summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBryan Ischo <bryan@ischo.com>2008-12-30 12:29:54 +0000
committerBryan Ischo <bryan@ischo.com>2008-12-30 12:29:54 +0000
commit203d33fdb7778080865879fb173f52009652fbd7 (patch)
tree60bf716f3cf955ae2897483aa7f27eb15a7090bd
parent37d90995389c2aed1d37b7b3aa6215759e14b94a (diff)
downloadceph-libs3-203d33fdb7778080865879fb173f52009652fbd7.tar.gz
* Undid my last change. Now I realize why you should only ever use spaces
in code indentations (because it's impossible to get everything to line up if tabs of arbitrary width are used), and so I'm reverting back to spaces for everything.
-rw-r--r--inc/error_parser.h44
-rw-r--r--inc/libs3.h1914
-rw-r--r--inc/mingw/pthread.h2
-rw-r--r--inc/mingw/sys/utsname.h4
-rw-r--r--inc/request.h158
-rw-r--r--inc/request_context.h4
-rw-r--r--inc/response_headers_handler.h28
-rw-r--r--inc/simplexml.h16
-rw-r--r--inc/string_buffer.h100
-rw-r--r--inc/util.h18
-rw-r--r--src/acl.c482
-rw-r--r--src/bucket.c1074
-rw-r--r--src/error_parser.c362
-rw-r--r--src/general.c804
-rw-r--r--src/mingw_functions.c114
-rw-r--r--src/mingw_s3_functions.c6
-rw-r--r--src/object.c482
-rw-r--r--src/request.c2282
-rw-r--r--src/request_context.c260
-rw-r--r--src/response_headers_handler.c322
-rw-r--r--src/s3.c4642
-rw-r--r--src/service.c240
-rw-r--r--src/service_access_logging.c862
-rw-r--r--src/simplexml.c218
-rw-r--r--src/testsimplexml.c76
-rw-r--r--src/util.c790
26 files changed, 7652 insertions, 7652 deletions
diff --git a/inc/error_parser.h b/inc/error_parser.h
index ad75225..8785201 100644
--- a/inc/error_parser.h
+++ b/inc/error_parser.h
@@ -36,34 +36,34 @@
typedef struct ErrorParser
{
- // This is the S3ErrorDetails that this ErrorParser fills in from the
- // data that it parses
- S3ErrorDetails s3ErrorDetails;
+ // This is the S3ErrorDetails that this ErrorParser fills in from the
+ // data that it parses
+ S3ErrorDetails s3ErrorDetails;
- // This is the error XML parser
- SimpleXml errorXmlParser;
+ // This is the error XML parser
+ SimpleXml errorXmlParser;
- // Set to 1 after the first call to add
- int errorXmlParserInitialized;
+ // Set to 1 after the first call to add
+ int errorXmlParserInitialized;
- // Used to buffer the S3 Error Code as it is read in
- string_buffer(code, 1024);
+ // Used to buffer the S3 Error Code as it is read in
+ string_buffer(code, 1024);
- // Used to buffer the S3 Error Message as it is read in
- string_buffer(message, 1024);
+ // Used to buffer the S3 Error Message as it is read in
+ string_buffer(message, 1024);
- // Used to buffer the S3 Error Resource as it is read in
- string_buffer(resource, 1024);
+ // Used to buffer the S3 Error Resource as it is read in
+ string_buffer(resource, 1024);
- // Used to buffer the S3 Error Further Details as it is read in
- string_buffer(furtherDetails, 1024);
-
- // The extra details; we support up to EXTRA_DETAILS_SIZE of them
- S3NameValue extraDetails[EXTRA_DETAILS_SIZE];
+ // Used to buffer the S3 Error Further Details as it is read in
+ string_buffer(furtherDetails, 1024);
+
+ // The extra details; we support up to EXTRA_DETAILS_SIZE of them
+ S3NameValue extraDetails[EXTRA_DETAILS_SIZE];
- // This is the buffer from which the names and values used in extraDetails
- // are allocated
- string_multibuffer(extraDetailsNamesValues, EXTRA_DETAILS_SIZE * 1024);
+ // This is the buffer from which the names and values used in extraDetails
+ // are allocated
+ string_multibuffer(extraDetailsNamesValues, EXTRA_DETAILS_SIZE * 1024);
} ErrorParser;
@@ -71,7 +71,7 @@ typedef struct ErrorParser
void error_parser_initialize(ErrorParser *errorParser);
S3Status error_parser_add(ErrorParser *errorParser, char *buffer,
- int bufferSize);
+ int bufferSize);
void error_parser_convert_status(ErrorParser *errorParser, S3Status *status);
diff --git a/inc/libs3.h b/inc/libs3.h
index 8f9b199..89fbcc4 100644
--- a/inc/libs3.h
+++ b/inc/libs3.h
@@ -44,31 +44,31 @@ extern "C" {
* http://s3.amazonaws.com). Its design goals are:
*
* - To provide a simple and straightforward API for accessing all of S3's
- * functionality
+ * functionality
* - To not require the developer using libs3 to need to know anything about:
- * - HTTP
- * - XML
- * - SSL
- * In other words, this API is meant to stand on its own, without requiring
- * any implicit knowledge of how S3 services are accessed using HTTP
- * protocols.
+ * - HTTP
+ * - XML
+ * - SSL
+ * In other words, this API is meant to stand on its own, without requiring
+ * any implicit knowledge of how S3 services are accessed using HTTP
+ * protocols.
* - To be usable from multithreaded code
* - To be usable by code which wants to process multiple S3 requests
- * simultaneously from a single thread
+ * simultaneously from a single thread
* - To be usable in the simple, straightforward way using sequentialized
- * blocking requests
+ * blocking requests
*
* The general usage pattern of libs3 is:
*
* - Initialize libs3 once per program by calling S3_initialize() at program
- * start up time
+ * start up time
* - Make any number of requests to S3 for getting, putting, or listing
- * S3 buckets or objects, or modifying the ACLs associated with buckets
- * or objects, using one of three general approaches:
- * 1. Simple blocking requests, one at a time
- * 2. Multiple threads each making simple blocking requests
- * 3. From a single thread, managing multiple S3 requests simultaneously
- * using file descriptors and a select()/poll() loop
+ * S3 buckets or objects, or modifying the ACLs associated with buckets
+ * or objects, using one of three general approaches:
+ * 1. Simple blocking requests, one at a time
+ * 2. Multiple threads each making simple blocking requests
+ * 3. From a single thread, managing multiple S3 requests simultaneously
+ * using file descriptors and a select()/poll() loop
* - Shut down libs3 at program exit time by calling S3_deinitialize()
*
* In order to use libs3 with multiple threads, your program must provide
@@ -92,27 +92,27 @@ extern "C" {
*
* NOTE: Because HTTP and the S3 REST protocol are highly under-specified,
* libs3 must make some assumptions about the maximum length of certain HTTP
- * elements (such as headers) that it will accept. While efforts have been
+ * elements (such as headers) that it will accept. While efforts have been
* made to enforce maximums which are beyond that expected to be needed by any
* user of S3, it is always possible that these maximums may be too low in
- * some rare circumstances. Bug reports should this unlikely situation occur
+ * some rare circumstances. Bug reports should this unlikely situation occur
* would be most appreciated.
*
* Threading Rules
* ---------------
*
* 1. All arguments passed to any function must not be modified directly until
- * the function returns.
+ * the function returns.
* 2. All S3RequestContext and S3Request arguments passed to all functions may
- * not be passed to any other libs3 function by any other thread until the
- * function returns.
+ * not be passed to any other libs3 function by any other thread until the
+ * function returns.
* 3. All functions may be called simultaneously by multiple threads as long
- * as (1) and (2) are observed, EXCEPT for S3_initialize(), which must be
- * called from one thread at a time only.
+ * as (1) and (2) are observed, EXCEPT for S3_initialize(), which must be
+ * called from one thread at a time only.
* 4. All callbacks will be made in the thread of the caller of the function
- * which invoked them, so the caller of all libs3 functions should not hold
- * locks that it would try to re-acquire in a callback, as this may
- * deadlock.
+ * which invoked them, so the caller of all libs3 functions should not hold
+ * locks that it would try to re-acquire in a callback, as this may
+ * deadlock.
************************************************************************** **/
@@ -125,32 +125,32 @@ extern "C" {
* style requests will prepend the bucket name to this host name, and
* path-style requests will use this hostname directly
**/
-#define S3_HOSTNAME "s3.amazonaws.com"
+#define S3_HOSTNAME "s3.amazonaws.com"
/**
* S3_MAX_BUCKET_NAME_SIZE is the maximum size of a bucket name.
**/
-#define S3_MAX_BUCKET_NAME_SIZE 255
+#define S3_MAX_BUCKET_NAME_SIZE 255
/**
* S3_MAX_KEY_SIZE is the maximum size of keys that Amazon S3 supports.
**/
-#define S3_MAX_KEY_SIZE 1024
+#define S3_MAX_KEY_SIZE 1024
/**
* S3_MAX_METADATA_SIZE is the maximum number of bytes allowed for
* x-amz-meta header names and values in any request passed to Amazon S3
**/
-#define S3_MAX_METADATA_SIZE 2048
+#define S3_MAX_METADATA_SIZE 2048
/**
* S3_METADATA_HEADER_NAME_PREFIX is the prefix of an S3 "meta header"
**/
-#define S3_METADATA_HEADER_NAME_PREFIX "x-amz-meta-"
+#define S3_METADATA_HEADER_NAME_PREFIX "x-amz-meta-"
/**
@@ -161,7 +161,7 @@ extern "C" {
* "x-amz-meta-nv".
**/
#define S3_MAX_METADATA_COUNT \
- (S3_MAX_METADATA_SIZE / (sizeof(S3_METADATA_HEADER_NAME_PREFIX "nv") - 1))
+ (S3_MAX_METADATA_SIZE / (sizeof(S3_METADATA_HEADER_NAME_PREFIX "nv") - 1))
/**
@@ -169,7 +169,7 @@ extern "C" {
* set on a bucket or object at one time. It is also the maximum number of
* ACL grants that the XML ACL parsing routine will parse.
**/
-#define S3_MAX_ACL_GRANT_COUNT 100
+#define S3_MAX_ACL_GRANT_COUNT 100
/**
@@ -183,7 +183,7 @@ extern "C" {
* This is the maximum number of characters (including terminating \0) that
* libs3 supports in an ACL grantee user id.
**/
-#define S3_MAX_GRANTEE_USER_ID_SIZE 128
+#define S3_MAX_GRANTEE_USER_ID_SIZE 128
/**
@@ -199,9 +199,9 @@ extern "C" {
* query string
**/
#define S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE \
- (sizeof("https://" S3_HOSTNAME "/") + (S3_MAX_KEY_SIZE * 3) + \
- sizeof("?AWSAccessKeyId=") + 32 + sizeof("&Expires=") + 32 + \
- sizeof("&Signature=") + 28 + 1)
+ (sizeof("https://" S3_HOSTNAME "/") + (S3_MAX_KEY_SIZE * 3) + \
+ sizeof("?AWSAccessKeyId=") + 32 + sizeof("&Expires=") + 32 + \
+ sizeof("&Signature=") + 28 + 1)
/**
@@ -209,14 +209,14 @@ extern "C" {
* the winsock library should be initialized by libs3; only relevent on
* Microsoft Windows platforms.
**/
-#define S3_INIT_WINSOCK 1
+#define S3_INIT_WINSOCK 1
/**
* This convenience constant is used by the S3_initialize() function to
* indicate that all libraries required by libs3 should be initialized.
**/
-#define S3_INIT_ALL (S3_INIT_WINSOCK)
+#define S3_INIT_ALL (S3_INIT_WINSOCK)
/** **************************************************************************
@@ -230,138 +230,138 @@ extern "C" {
**/
typedef enum
{
- S3StatusOK ,
-
- /**
- * Errors that prevent the S3 request from being issued or response from
- * being read
- **/
- S3StatusInternalError ,
- S3StatusOutOfMemory ,
- S3StatusInterrupted ,
- S3StatusInvalidBucketNameTooLong ,
- S3StatusInvalidBucketNameFirstCharacter ,
- S3StatusInvalidBucketNameCharacter ,
- S3StatusInvalidBucketNameCharacterSequence ,
- S3StatusInvalidBucketNameTooShort ,
- S3StatusInvalidBucketNameDotQuadNotation ,
- S3StatusQueryParamsTooLong ,
- S3StatusFailedToInitializeRequest ,
- S3StatusMetaDataHeadersTooLong ,
- S3StatusBadMetaData ,
- S3StatusBadContentType ,
- S3StatusContentTypeTooLong ,
- S3StatusBadMD5 ,
- S3StatusMD5TooLong ,
- S3StatusBadCacheControl ,
- S3StatusCacheControlTooLong ,
- S3StatusBadContentDispositionFilename ,
- S3StatusContentDispositionFilenameTooLong ,
- S3StatusBadContentEncoding ,
- S3StatusContentEncodingTooLong ,
- S3StatusBadIfMatchETag ,
- S3StatusIfMatchETagTooLong ,
- S3StatusBadIfNotMatchETag ,
- S3StatusIfNotMatchETagTooLong ,
- S3StatusHeadersTooLong ,
- S3StatusKeyTooLong ,
- S3StatusUriTooLong ,
- S3StatusXmlParseFailure ,
- S3StatusEmailAddressTooLong ,
- S3StatusUserIdTooLong ,
- S3StatusUserDisplayNameTooLong ,
- S3StatusGroupUriTooLong ,
- S3StatusPermissionTooLong ,
- S3StatusTargetBucketTooLong ,
- S3StatusTargetPrefixTooLong ,
- S3StatusTooManyGrants ,
- S3StatusBadGrantee ,
- S3StatusBadPermission ,
- S3StatusXmlDocumentTooLarge ,
- S3StatusNameLookupError ,
- S3StatusFailedToConnect ,
- S3StatusServerFailedVerification ,
- S3StatusConnectionFailed ,
- S3StatusAbortedByCallback ,
-
- /**
- * Errors from the S3 service
- **/
- S3StatusErrorAccessDenied ,
- S3StatusErrorAccountProblem ,
- S3StatusErrorAmbiguousGrantByEmailAddress ,
- S3StatusErrorBadDigest ,
- S3StatusErrorBucketAlreadyExists ,
- S3StatusErrorBucketAlreadyOwnedByYou ,
- S3StatusErrorBucketNotEmpty ,
- S3StatusErrorCredentialsNotSupported ,
- S3StatusErrorCrossLocationLoggingProhibited ,
- S3StatusErrorEntityTooSmall ,
- S3StatusErrorEntityTooLarge ,
- S3StatusErrorExpiredToken ,
- S3StatusErrorIncompleteBody ,
- S3StatusErrorIncorrectNumberOfFilesInPostRequest ,
- S3StatusErrorInlineDataTooLarge ,
- S3StatusErrorInternalError ,
- S3StatusErrorInvalidAccessKeyId ,
- S3StatusErrorInvalidAddressingHeader ,
- S3StatusErrorInvalidArgument ,
- S3StatusErrorInvalidBucketName ,
- S3StatusErrorInvalidDigest ,
- S3StatusErrorInvalidLocationConstraint ,
- S3StatusErrorInvalidPayer ,
- S3StatusErrorInvalidPolicyDocument ,
- S3StatusErrorInvalidRange ,
- S3StatusErrorInvalidSecurity ,
- S3StatusErrorInvalidSOAPRequest ,
- S3StatusErrorInvalidStorageClass ,
- S3StatusErrorInvalidTargetBucketForLogging ,
- S3StatusErrorInvalidToken ,
- S3StatusErrorInvalidURI ,
- S3StatusErrorKeyTooLong ,
- S3StatusErrorMalformedACLError ,
- S3StatusErrorMalformedXML ,
- S3StatusErrorMaxMessageLengthExceeded ,
- S3StatusErrorMaxPostPreDataLengthExceededError ,
- S3StatusErrorMetadataTooLarge ,
- S3StatusErrorMethodNotAllowed ,
- S3StatusErrorMissingAttachment ,
- S3StatusErrorMissingContentLength ,
- S3StatusErrorMissingSecurityElement ,
- S3StatusErrorMissingSecurityHeader ,
- S3StatusErrorNoLoggingStatusForKey ,
- S3StatusErrorNoSuchBucket ,
- S3StatusErrorNoSuchKey ,
- S3StatusErrorNotImplemented ,
- S3StatusErrorNotSignedUp ,
- S3StatusErrorOperationAborted ,
- S3StatusErrorPermanentRedirect ,
- S3StatusErrorPreconditionFailed ,
- S3StatusErrorRedirect ,
- S3StatusErrorRequestIsNotMultiPartContent ,
- S3StatusErrorRequestTimeout ,
- S3StatusErrorRequestTimeTooSkewed ,
- S3StatusErrorRequestTorrentOfBucketError ,
- S3StatusErrorSignatureDoesNotMatch ,
- S3StatusErrorSlowDown ,
- S3StatusErrorTemporaryRedirect ,
- S3StatusErrorTokenRefreshRequired ,
- S3StatusErrorTooManyBuckets ,
- S3StatusErrorUnexpectedContent ,
- S3StatusErrorUnresolvableGrantByEmailAddress ,
- S3StatusErrorUserKeyMustBeSpecified ,
- S3StatusErrorUnknown ,
-
- /**
- * The following are HTTP errors returned by S3 without enough detail to
- * distinguish any of the above S3StatusError conditions
- **/
- S3StatusHttpErrorMovedTemporarily ,
- S3StatusHttpErrorBadRequest ,
- S3StatusHttpErrorForbidden ,
- S3StatusHttpErrorNotFound ,
- S3StatusHttpErrorConflict ,
- S3StatusHttpErrorUnknown
+ S3StatusOK ,
+
+ /**
+ * Errors that prevent the S3 request from being issued or response from
+ * being read
+ **/
+ S3StatusInternalError ,
+ S3StatusOutOfMemory ,
+ S3StatusInterrupted ,
+ S3StatusInvalidBucketNameTooLong ,
+ S3StatusInvalidBucketNameFirstCharacter ,
+ S3StatusInvalidBucketNameCharacter ,
+ S3StatusInvalidBucketNameCharacterSequence ,
+ S3StatusInvalidBucketNameTooShort ,
+ S3StatusInvalidBucketNameDotQuadNotation ,
+ S3StatusQueryParamsTooLong ,
+ S3StatusFailedToInitializeRequest ,
+ S3StatusMetaDataHeadersTooLong ,
+ S3StatusBadMetaData ,
+ S3StatusBadContentType ,
+ S3StatusContentTypeTooLong ,
+ S3StatusBadMD5 ,
+ S3StatusMD5TooLong ,
+ S3StatusBadCacheControl ,
+ S3StatusCacheControlTooLong ,
+ S3StatusBadContentDispositionFilename ,
+ S3StatusContentDispositionFilenameTooLong ,
+ S3StatusBadContentEncoding ,
+ S3StatusContentEncodingTooLong ,
+ S3StatusBadIfMatchETag ,
+ S3StatusIfMatchETagTooLong ,
+ S3StatusBadIfNotMatchETag ,
+ S3StatusIfNotMatchETagTooLong ,
+ S3StatusHeadersTooLong ,
+ S3StatusKeyTooLong ,
+ S3StatusUriTooLong ,
+ S3StatusXmlParseFailure ,
+ S3StatusEmailAddressTooLong ,
+ S3StatusUserIdTooLong ,
+ S3StatusUserDisplayNameTooLong ,
+ S3StatusGroupUriTooLong ,
+ S3StatusPermissionTooLong ,
+ S3StatusTargetBucketTooLong ,
+ S3StatusTargetPrefixTooLong ,
+ S3StatusTooManyGrants ,
+ S3StatusBadGrantee ,
+ S3StatusBadPermission ,
+ S3StatusXmlDocumentTooLarge ,
+ S3StatusNameLookupError ,
+ S3StatusFailedToConnect ,
+ S3StatusServerFailedVerification ,
+ S3StatusConnectionFailed ,
+ S3StatusAbortedByCallback ,
+
+ /**
+ * Errors from the S3 service
+ **/
+ S3StatusErrorAccessDenied ,
+ S3StatusErrorAccountProblem ,
+ S3StatusErrorAmbiguousGrantByEmailAddress ,
+ S3StatusErrorBadDigest ,
+ S3StatusErrorBucketAlreadyExists ,
+ S3StatusErrorBucketAlreadyOwnedByYou ,
+ S3StatusErrorBucketNotEmpty ,
+ S3StatusErrorCredentialsNotSupported ,
+ S3StatusErrorCrossLocationLoggingProhibited ,
+ S3StatusErrorEntityTooSmall ,
+ S3StatusErrorEntityTooLarge ,
+ S3StatusErrorExpiredToken ,
+ S3StatusErrorIncompleteBody ,
+ S3StatusErrorIncorrectNumberOfFilesInPostRequest ,
+ S3StatusErrorInlineDataTooLarge ,
+ S3StatusErrorInternalError ,
+ S3StatusErrorInvalidAccessKeyId ,
+ S3StatusErrorInvalidAddressingHeader ,
+ S3StatusErrorInvalidArgument ,
+ S3StatusErrorInvalidBucketName ,
+ S3StatusErrorInvalidDigest ,
+ S3StatusErrorInvalidLocationConstraint ,
+ S3StatusErrorInvalidPayer ,
+ S3StatusErrorInvalidPolicyDocument ,
+ S3StatusErrorInvalidRange ,
+ S3StatusErrorInvalidSecurity ,
+ S3StatusErrorInvalidSOAPRequest ,
+ S3StatusErrorInvalidStorageClass ,
+ S3StatusErrorInvalidTargetBucketForLogging ,
+ S3StatusErrorInvalidToken ,
+ S3StatusErrorInvalidURI ,
+ S3StatusErrorKeyTooLong ,
+ S3StatusErrorMalformedACLError ,
+ S3StatusErrorMalformedXML ,
+ S3StatusErrorMaxMessageLengthExceeded ,
+ S3StatusErrorMaxPostPreDataLengthExceededError ,
+ S3StatusErrorMetadataTooLarge ,
+ S3StatusErrorMethodNotAllowed ,
+ S3StatusErrorMissingAttachment ,
+ S3StatusErrorMissingContentLength ,
+ S3StatusErrorMissingSecurityElement ,
+ S3StatusErrorMissingSecurityHeader ,
+ S3StatusErrorNoLoggingStatusForKey ,
+ S3StatusErrorNoSuchBucket ,
+ S3StatusErrorNoSuchKey ,
+ S3StatusErrorNotImplemented ,
+ S3StatusErrorNotSignedUp ,
+ S3StatusErrorOperationAborted ,
+ S3StatusErrorPermanentRedirect ,
+ S3StatusErrorPreconditionFailed ,
+ S3StatusErrorRedirect ,
+ S3StatusErrorRequestIsNotMultiPartContent ,
+ S3StatusErrorRequestTimeout ,
+ S3StatusErrorRequestTimeTooSkewed ,
+ S3StatusErrorRequestTorrentOfBucketError ,
+ S3StatusErrorSignatureDoesNotMatch ,
+ S3StatusErrorSlowDown ,
+ S3StatusErrorTemporaryRedirect ,
+ S3StatusErrorTokenRefreshRequired ,
+ S3StatusErrorTooManyBuckets ,
+ S3StatusErrorUnexpectedContent ,
+ S3StatusErrorUnresolvableGrantByEmailAddress ,
+ S3StatusErrorUserKeyMustBeSpecified ,
+ S3StatusErrorUnknown ,
+
+ /**
+ * The following are HTTP errors returned by S3 without enough detail to
+ * distinguish any of the above S3StatusError conditions
+ **/
+ S3StatusHttpErrorMovedTemporarily ,
+ S3StatusHttpErrorBadRequest ,
+ S3StatusHttpErrorForbidden ,
+ S3StatusHttpErrorNotFound ,
+ S3StatusHttpErrorConflict ,
+ S3StatusHttpErrorUnknown
} S3Status;
@@ -371,102 +371,102 @@ typedef enum
*
* In general, HTTPS is greatly preferred (and should be the default of any
* application using libs3) because it protects any data being sent to or
- * from S3 using strong encryption. However, HTTPS is much more CPU intensive
+ * from S3 using strong encryption. However, HTTPS is much more CPU intensive
* than HTTP, and if the caller is absolutely certain that it is OK for the
* data to be viewable by anyone in transit, then HTTP can be used.
**/
typedef enum
{
- S3ProtocolHTTPS = 0,
- S3ProtocolHTTP = 1
+ S3ProtocolHTTPS = 0,
+ S3ProtocolHTTP = 1
} S3Protocol;
/**
* S3UriStyle defines the form that an Amazon S3 URI identifying a bucket or
- * object can take. They are of these forms:
+ * object can take. They are of these forms:
*
* Virtual Host: ${protocol}://${bucket}.s3.amazonaws.com/[${key}]
* Path: ${protocol}://s3.amazonaws.com/${bucket}/[${key}]
*
* It is generally better to use the Virual Host URI form, because it ensures
* that the bucket name used is compatible with normal HTTP GETs and POSTs of
- * data to/from the bucket. However, if DNS lookups for the bucket are too
+ * data to/from the bucket. However, if DNS lookups for the bucket are too
* slow or unreliable for some reason, Path URI form may be used.
**/
typedef enum
{
- S3UriStyleVirtualHost = 0,
- S3UriStylePath = 1
+ S3UriStyleVirtualHost = 0,
+ S3UriStylePath = 1
} S3UriStyle;
/**
* S3GranteeType defines the type of Grantee used in an S3 ACL Grant.
* Amazon Customer By Email - identifies the Grantee using their Amazon S3
- * account email address
+ * account email address
* Canonical User - identifies the Grantee by S3 User ID and Display Name,
- * which can only be obtained by making requests to S3, for example, by
- * listing owned buckets
+ * which can only be obtained by making requests to S3, for example, by
+ * listing owned buckets
* All AWS Users - identifies all authenticated AWS users
* All Users - identifies all users
* Log Delivery - identifies the Amazon group responsible for writing
- * server access logs into buckets
+ * server access logs into buckets
**/
typedef enum
{
- S3GranteeTypeAmazonCustomerByEmail = 0,
- S3GranteeTypeCanonicalUser = 1,
- S3GranteeTypeAllAwsUsers = 2,
- S3GranteeTypeAllUsers = 3,
- S3GranteeTypeLogDelivery = 4
+ S3GranteeTypeAmazonCustomerByEmail = 0,
+ S3GranteeTypeCanonicalUser = 1,
+ S3GranteeTypeAllAwsUsers = 2,
+ S3GranteeTypeAllUsers = 3,
+ S3GranteeTypeLogDelivery = 4
} S3GranteeType;
/**
* This is an individual permission granted to a grantee in an S3 ACL Grant.
* Read permission gives the Grantee the permission to list the bucket, or
- * read the object or its metadata
+ * read the object or its metadata
* Write permission gives the Grantee the permission to create, overwrite, or
- * delete any object in the bucket, and is not supported for objects
+ * delete any object in the bucket, and is not supported for objects
* ReadACP permission gives the Grantee the permission to read the ACP for
- * the bucket or object; the owner of the bucket or object always has
- * this permission implicitly
+ * the bucket or object; the owner of the bucket or object always has
+ * this permission implicitly
* WriteACP permission gives the Grantee the permission to overwrite the ACP
- * for the bucket or object; the owner of the bucket or object always has
- * this permission implicitly
+ * for the bucket or object; the owner of the bucket or object always has
+ * this permission implicitly
* FullControl permission gives the Grantee all permissions specified by the
- * Read, Write, ReadACP, and WriteACP permissions
+ * Read, Write, ReadACP, and WriteACP permissions
**/
typedef enum
{
- S3PermissionRead = 0,
- S3PermissionWrite = 1,
- S3PermissionReadACP = 2,
- S3PermissionWriteACP = 3,
- S3PermissionFullControl = 4
+ S3PermissionRead = 0,
+ S3PermissionWrite = 1,
+ S3PermissionReadACP = 2,
+ S3PermissionWriteACP = 3,
+ S3PermissionFullControl = 4
} S3Permission;
/**
* S3CannedAcl is an ACL that can be specified when an object is created or
- * updated. Each canned ACL has a predefined value when expanded to a full
+ * updated. Each canned ACL has a predefined value when expanded to a full
* set of S3 ACL Grants.
* Private canned ACL gives the owner FULL_CONTROL and no other permissions
- * are issued
+ * are issued
* Public Read canned ACL gives the owner FULL_CONTROL and all users Read
- * permission
+ * permission
* Public Read Write canned ACL gives the owner FULL_CONTROL and all users
- * Read and Write permission
+ * Read and Write permission
* AuthenticatedRead canned ACL gives the owner FULL_CONTROL and authenticated
- * S3 users Read permission
+ * S3 users Read permission
**/
typedef enum
{
- S3CannedAclPrivate = 0, /* private */
- S3CannedAclPublicRead = 1, /* public-read */
- S3CannedAclPublicReadWrite = 2, /* public-read-write */
- S3CannedAclAuthenticatedRead = 3 /* authenticated-read */
+ S3CannedAclPrivate = 0, /* private */
+ S3CannedAclPublicRead = 1, /* public-read */
+ S3CannedAclPublicReadWrite = 2, /* public-read-write */
+ S3CannedAclAuthenticatedRead = 3 /* authenticated-read */
} S3CannedAcl;
@@ -487,296 +487,296 @@ typedef struct S3RequestContext S3RequestContext;
**/
typedef struct S3NameValue
{
- /**
- * The name part of the Name - Value pair
- **/
- const char *name;
-
- /**
- * The value part of the Name - Value pair
- **/
- const char *value;
+ /**
+ * The name part of the Name - Value pair
+ **/
+ const char *name;
+
+ /**
+ * The value part of the Name - Value pair
+ **/
+ const char *value;
} S3NameValue;
/**
* S3ResponseProperties is passed to the properties callback function which is
- * called when the complete response properties have been received. Some of
+ * called when the complete response properties have been received. Some of
* the fields of this structure are optional and may not be provided in the
* response, and some will always be provided in the response.
**/
typedef struct S3ResponseProperties
{
- /**
- * This optional field identifies the request ID and may be used when
- * reporting problems to Amazon.
- **/
- const char *requestId;
-
- /**
- * This optional field identifies the request ID and may be used when
- * reporting problems to Amazon.
- **/
- const char *requestId2;
-
- /**
- * This optional field is the content type of the data which is returned
- * by the request. If not provided, the default can be assumed to be
- * "binary/octet-stream".
- **/
- const char *contentType;
-
- /**
- * This optional field is the content length of the data which is returned
- * in the response. A negative value means that this value was not
- * provided in the response. A value of 0 means that there is no content
- * provided. A positive value gives the number of bytes in the content of
- * the response.
- **/
- uint64_t contentLength;
-
- /**
- * This optional field names the server which serviced the request.
- **/
- const char *server;
-
- /**
- * This optional field provides a string identifying the unique contents
- * of the resource identified by the request, such that the contents can
- * be assumed not to be changed if the same eTag is returned at a later
- * time decribing the same resource. This is an MD5 sum of the contents.
- **/
- const char *eTag;
-
- /**
- * This optional field provides the last modified time, relative to the
- * Unix epoch, of the contents. If this value is < 0, then the last
- * modified time was not provided in the response. If this value is >= 0,
- * then the last modified date of the contents are available as a number
- * of seconds since the UNIX epoch.
- *
- **/
- int64_t lastModified;
-
- /**
- * This is the number of user-provided meta data associated with the
- * resource.
- **/
- int metaDataCount;
-
- /**
- * These are the meta data associated with the resource. In each case,
- * the name will not include any S3-specific header prefixes
- * (i.e. x-amz-meta- will have been removed from the beginning), and
- * leading and trailing whitespace will have been stripped from the value.
- **/
- const S3NameValue *metaData;
+ /**
+ * This optional field identifies the request ID and may be used when
+ * reporting problems to Amazon.
+ **/
+ const char *requestId;
+
+ /**
+ * This optional field identifies the request ID and may be used when
+ * reporting problems to Amazon.
+ **/
+ const char *requestId2;
+
+ /**
+ * This optional field is the content type of the data which is returned
+ * by the request. If not provided, the default can be assumed to be
+ * "binary/octet-stream".
+ **/
+ const char *contentType;
+
+ /**
+ * This optional field is the content length of the data which is returned
+ * in the response. A negative value means that this value was not
+ * provided in the response. A value of 0 means that there is no content
+ * provided. A positive value gives the number of bytes in the content of
+ * the response.
+ **/
+ uint64_t contentLength;
+
+ /**
+ * This optional field names the server which serviced the request.
+ **/
+ const char *server;
+
+ /**
+ * This optional field provides a string identifying the unique contents
+ * of the resource identified by the request, such that the contents can
+ * be assumed not to be changed if the same eTag is returned at a later
+ * time decribing the same resource. This is an MD5 sum of the contents.
+ **/
+ const char *eTag;
+
+ /**
+ * This optional field provides the last modified time, relative to the
+ * Unix epoch, of the contents. If this value is < 0, then the last
+ * modified time was not provided in the response. If this value is >= 0,
+ * then the last modified date of the contents are available as a number
+ * of seconds since the UNIX epoch.
+ *
+ **/
+ int64_t lastModified;
+
+ /**
+ * This is the number of user-provided meta data associated with the
+ * resource.
+ **/
+ int metaDataCount;
+
+ /**
+ * These are the meta data associated with the resource. In each case,
+ * the name will not include any S3-specific header prefixes
+ * (i.e. x-amz-meta- will have been removed from the beginning), and
+ * leading and trailing whitespace will have been stripped from the value.
+ **/
+ const S3NameValue *metaData;
} S3ResponseProperties;
/**
- * S3AclGrant identifies a single grant in the ACL for a bucket or object. An
+ * S3AclGrant identifies a single grant in the ACL for a bucket or object. An
* ACL is composed of any number of grants, which specify a grantee and the
* permissions given to that grantee. S3 does not normalize ACLs in any way,
* so a redundant ACL specification will lead to a redundant ACL stored in S3.
**/
typedef struct S3AclGrant
{
- /**
- * The granteeType gives the type of grantee specified by this grant.
- **/
- S3GranteeType granteeType;
- /**
- * The identifier of the grantee that is set is determined by the
- * granteeType:
- *
- * S3GranteeTypeAmazonCustomerByEmail - amazonCustomerByEmail.emailAddress
- * S3GranteeTypeCanonicalUser - canonicalUser.id, canonicalUser.displayName
- * S3GranteeTypeAllAwsUsers - none
- * S3GranteeTypeAllUsers - none
- **/
- union
- {
- /**
- * This structure is used iff the granteeType is
- * S3GranteeTypeAmazonCustomerByEmail.
- **/
- struct
- {
- /**
- * This is the email address of the Amazon Customer being granted
- * permissions by this S3AclGrant.
- **/
- char emailAddress[S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE];
- } amazonCustomerByEmail;
- /**
- * This structure is used iff the granteeType is
- * S3GranteeTypeCanonicalUser.
- **/
- struct
- {
- /**
- * This is the CanonicalUser ID of the grantee
- **/
- char id[S3_MAX_GRANTEE_USER_ID_SIZE];
- /**
- * This is the display name of the grantee
- **/
- char displayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
- } canonicalUser;
- } grantee;
- /**
- * This is the S3Permission to be granted to the grantee
- **/
- S3Permission permission;
+ /**
+ * The granteeType gives the type of grantee specified by this grant.
+ **/
+ S3GranteeType granteeType;
+ /**
+ * The identifier of the grantee that is set is determined by the
+ * granteeType:
+ *
+ * S3GranteeTypeAmazonCustomerByEmail - amazonCustomerByEmail.emailAddress
+ * S3GranteeTypeCanonicalUser - canonicalUser.id, canonicalUser.displayName
+ * S3GranteeTypeAllAwsUsers - none
+ * S3GranteeTypeAllUsers - none
+ **/
+ union
+ {
+ /**
+ * This structure is used iff the granteeType is
+ * S3GranteeTypeAmazonCustomerByEmail.
+ **/
+ struct
+ {
+ /**
+ * This is the email address of the Amazon Customer being granted
+ * permissions by this S3AclGrant.
+ **/
+ char emailAddress[S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE];
+ } amazonCustomerByEmail;
+ /**
+ * This structure is used iff the granteeType is
+ * S3GranteeTypeCanonicalUser.
+ **/
+ struct
+ {
+ /**
+ * This is the CanonicalUser ID of the grantee
+ **/
+ char id[S3_MAX_GRANTEE_USER_ID_SIZE];
+ /**
+ * This is the display name of the grantee
+ **/
+ char displayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+ } canonicalUser;
+ } grantee;
+ /**
+ * This is the S3Permission to be granted to the grantee
+ **/
+ S3Permission permission;
} S3AclGrant;
/**
- * A context for working with objects within a bucket. A bucket context holds
+ * A context for working with objects within a bucket. A bucket context holds
* all information necessary for working with a bucket, and may be used
* repeatedly over many consecutive (or simultaneous) calls into libs3 bucket
* operation functions.
**/
typedef struct S3BucketContext
{
- /**
- * The name of the bucket to use in the bucket context
- **/
- const char *bucketName;
-
- /**
- * The protocol to use when accessing the bucket
- **/
- S3Protocol protocol;
-
- /**
- * The URI style to use for all URIs sent to Amazon S3 while working with
- * this bucket context
- **/
- S3UriStyle uriStyle;
-
- /**
- * The Amazon Access Key ID to use for access to the bucket
- **/
- const char *accessKeyId;
-
- /**
- * The Amazon Secret Access Key to use for access to the bucket
- **/
- const char *secretAccessKey;
+ /**
+ * The name of the bucket to use in the bucket context
+ **/
+ const char *bucketName;
+
+ /**
+ * The protocol to use when accessing the bucket
+ **/
+ S3Protocol protocol;
+
+ /**
+ * The URI style to use for all URIs sent to Amazon S3 while working with
+ * this bucket context
+ **/
+ S3UriStyle uriStyle;
+
+ /**
+ * The Amazon Access Key ID to use for access to the bucket
+ **/
+ const char *accessKeyId;
+
+ /**
+ * The Amazon Secret Access Key to use for access to the bucket
+ **/
+ const char *secretAccessKey;
} S3BucketContext;
/**
* This is a single entry supplied to the list bucket callback by a call to
- * S3_list_bucket. It identifies a single matching key from the list
+ * S3_list_bucket. It identifies a single matching key from the list
* operation.
**/
typedef struct S3ListBucketContent
{
- /**
- * This is the next key in the list bucket results.
- **/
- const char *key;
-
- /**
- * This is the number of seconds since UNIX epoch of the last modified
- * date of the object identified by the key.
- **/
- int64_t lastModified;
-
- /**
- * This gives a tag which gives a signature of the contents of the object,
- * which is the MD5 of the contents of the object.
- **/
- const char *eTag;
-
- /**
- * This is the size of the object in bytes.
- **/
- uint64_t size;
-
- /**
- * This is the ID of the owner of the key; it is present only if access
- * permissions allow it to be viewed.
- **/
- const char *ownerId;
-
- /**
- * This is the display name of the owner of the key; it is present only if
- * access permissions allow it to be viewed.
- **/
- const char *ownerDisplayName;
+ /**
+ * This is the next key in the list bucket results.
+ **/
+ const char *key;
+
+ /**
+ * This is the number of seconds since UNIX epoch of the last modified
+ * date of the object identified by the key.
+ **/
+ int64_t lastModified;
+
+ /**
+ * This gives a tag which gives a signature of the contents of the object,
+ * which is the MD5 of the contents of the object.
+ **/
+ const char *eTag;
+
+ /**
+ * This is the size of the object in bytes.
+ **/
+ uint64_t size;
+
+ /**
+ * This is the ID of the owner of the key; it is present only if access
+ * permissions allow it to be viewed.
+ **/
+ const char *ownerId;
+
+ /**
+ * This is the display name of the owner of the key; it is present only if
+ * access permissions allow it to be viewed.
+ **/
+ const char *ownerDisplayName;
} S3ListBucketContent;
/**
* S3PutProperties is the set of properties that may optionally be set by the
- * user when putting objects to S3. Each field of this structure is optional
+ * user when putting objects to S3. Each field of this structure is optional
* and may or may not be present.
**/
typedef struct S3PutProperties
{
- /**
- * If present, this is the Content-Type that should be associated with the
- * object. If not provided, S3 defaults to "binary/octet-stream".
- **/
- const char *contentType;
-
- /**
- * If present, this provides the MD5 signature of the contents, and is
- * used to validate the contents. This is highly recommended by Amazon
- * but not required. Its format is as a base64-encoded MD5 sum.
- **/
- const char *md5;
-
- /**
- * If present, this gives a Cache-Control header string to be supplied to
- * HTTP clients which download this
- **/
- const char *cacheControl;
-
- /**
- * If present, this gives the filename to save the downloaded file to,
- * whenever the object is downloaded via a web browser. This is only
- * relevent for objects which are intended to be shared to users via web
- * browsers and which is additionally intended to be downloaded rather
- * than viewed.
- **/
- const char *contentDispositionFilename;
-
- /**
- * If present, this identifies the content encoding of the object. This
- * is only applicable to encoded (usually, compressed) content, and only
- * relevent if the object is intended to be downloaded via a browser.
- **/
- const char *contentEncoding;
-
- /**
- * If >= 0, this gives an expiration date for the content. This
- * information is typically only delivered to users who download the
- * content via a web browser.
- **/
- int64_t expires;
-
- /**
- * This identifies the "canned ACL" that should be used for this object.
- * The default (0) gives only the owner of the object access to it.
- **/
- S3CannedAcl cannedAcl;
-
- /**
- * This is the number of values in the metaData field.
- **/
- int metaDataCount;
-
- /**
- * These are the meta data to pass to S3. In each case, the name part of
- * the Name - Value pair should not include any special S3 HTTP header
- * prefix (i.e., should be of the form 'foo', NOT 'x-amz-meta-foo').
- **/
- const S3NameValue *metaData;
+ /**
+ * If present, this is the Content-Type that should be associated with the
+ * object. If not provided, S3 defaults to "binary/octet-stream".
+ **/
+ const char *contentType;
+
+ /**
+ * If present, this provides the MD5 signature of the contents, and is
+ * used to validate the contents. This is highly recommended by Amazon
+ * but not required. Its format is as a base64-encoded MD5 sum.
+ **/
+ const char *md5;
+
+ /**
+ * If present, this gives a Cache-Control header string to be supplied to
+ * HTTP clients which download this
+ **/
+ const char *cacheControl;
+
+ /**
+ * If present, this gives the filename to save the downloaded file to,
+ * whenever the object is downloaded via a web browser. This is only
+ * relevent for objects which are intended to be shared to users via web
+ * browsers and which is additionally intended to be downloaded rather
+ * than viewed.
+ **/
+ const char *contentDispositionFilename;
+
+ /**
+ * If present, this identifies the content encoding of the object. This
+ * is only applicable to encoded (usually, compressed) content, and only
+ * relevent if the object is intended to be downloaded via a browser.
+ **/
+ const char *contentEncoding;
+
+ /**
+ * If >= 0, this gives an expiration date for the content. This
+ * information is typically only delivered to users who download the
+ * content via a web browser.
+ **/
+ int64_t expires;
+
+ /**
+ * This identifies the "canned ACL" that should be used for this object.
+ * The default (0) gives only the owner of the object access to it.
+ **/
+ S3CannedAcl cannedAcl;
+
+ /**
+ * This is the number of values in the metaData field.
+ **/
+ int metaDataCount;
+
+ /**
+ * These are the meta data to pass to S3. In each case, the name part of
+ * the Name - Value pair should not include any special S3 HTTP header
+ * prefix (i.e., should be of the form 'foo', NOT 'x-amz-meta-foo').
+ **/
+ const S3NameValue *metaData;
} S3PutProperties;
@@ -786,76 +786,76 @@ typedef struct S3PutProperties
**/
typedef struct S3GetConditions
{
- /**
- * The request will be processed if the Last-Modification header of the
- * object is greater than or equal to this value, specified as a number of
- * seconds since Unix epoch. If this value is less than zero, it will not
- * be used in the conditional.
- **/
- int64_t ifModifiedSince;
-
- /**
- * The request will be processed if the Last-Modification header of the
- * object is less than this value, specified as a number of seconds since
- * Unix epoch. If this value is less than zero, it will not be used in
- * the conditional.
- **/
- int64_t ifNotModifiedSince;
-
- /**
- * If non-NULL, this gives an eTag header value which the object must
- * match in order to be returned. Note that altough the eTag is simply an
- * MD5, this must be presented in the S3 eTag form, which typically
- * includes double-quotes.
- **/
- const char *ifMatchETag;
-
- /**
- * If non-NULL, this gives an eTag header value which the object must not
- * match in order to be returned. Note that altough the eTag is simply an
- * MD5, this must be presented in the S3 eTag form, which typically
- * includes double-quotes.
- **/
- const char *ifNotMatchETag;
+ /**
+ * The request will be processed if the Last-Modification header of the
+ * object is greater than or equal to this value, specified as a number of
+ * seconds since Unix epoch. If this value is less than zero, it will not
+ * be used in the conditional.
+ **/
+ int64_t ifModifiedSince;
+
+ /**
+ * The request will be processed if the Last-Modification header of the
+ * object is less than this value, specified as a number of seconds since
+ * Unix epoch. If this value is less than zero, it will not be used in
+ * the conditional.
+ **/
+ int64_t ifNotModifiedSince;
+
+ /**
+ * If non-NULL, this gives an eTag header value which the object must
+ * match in order to be returned. Note that altough the eTag is simply an
+ * MD5, this must be presented in the S3 eTag form, which typically
+ * includes double-quotes.
+ **/
+ const char *ifMatchETag;
+
+ /**
+ * If non-NULL, this gives an eTag header value which the object must not
+ * match in order to be returned. Note that altough the eTag is simply an
+ * MD5, this must be presented in the S3 eTag form, which typically
+ * includes double-quotes.
+ **/
+ const char *ifNotMatchETag;
} S3GetConditions;
/**
- * S3ErrorDetails provides detailed information describing an S3 error. This
+ * S3ErrorDetails provides detailed information describing an S3 error. This
* is only presented when the error is an S3-generated error (i.e. one of the
* S3StatusErrorXXX values).
**/
typedef struct S3ErrorDetails
{
- /**
- * This is the human-readable message that Amazon supplied describing the
- * error
- **/
- const char *message;
-
- /**
- * This identifies the resource for which the error occurred
- **/
- const char *resource;
-
- /**
- * This gives human-readable further details describing the specifics of
- * this error
- **/
- const char *furtherDetails;
-
- /**
- * This gives the number of S3NameValue pairs present in the extraDetails
- * array
- **/
- int extraDetailsCount;
-
- /**
- * S3 can provide extra details in a freeform Name - Value pair format.
- * Each error can have any number of these, and this array provides these
- * additional extra details.
- **/
- S3NameValue *extraDetails;
+ /**
+ * This is the human-readable message that Amazon supplied describing the
+ * error
+ **/
+ const char *message;
+
+ /**
+ * This identifies the resource for which the error occurred
+ **/
+ const char *resource;
+
+ /**
+ * This gives human-readable further details describing the specifics of
+ * this error
+ **/
+ const char *furtherDetails;
+
+ /**
+ * This gives the number of S3NameValue pairs present in the extraDetails
+ * array
+ **/
+ int extraDetailsCount;
+
+ /**
+ * S3 can provide extra details in a freeform Name - Value pair format.
+ * Each error can have any number of these, and this array provides these
+ * additional extra details.
+ **/
+ S3NameValue *extraDetails;
} S3ErrorDetails;
@@ -869,38 +869,38 @@ typedef struct S3ErrorDetails
*
* @param properties are the properties that are available from the response
* @param callbackData is the callback data as specified when the request
- * was issued.
+ * was issued.
* @return S3StatusOK to continue processing the request, anything else to
- * immediately abort the request with a status which will be
- * passed to the S3ResponseCompleteCallback for this request.
- * Typically, this will return either S3StatusOK or
- * S3StatusAbortedByCallback.
+ * immediately abort the request with a status which will be
+ * passed to the S3ResponseCompleteCallback for this request.
+ * Typically, this will return either S3StatusOK or
+ * S3StatusAbortedByCallback.
**/
typedef S3Status (S3ResponsePropertiesCallback)
- (const S3ResponseProperties *properties, void *callbackData);
+ (const S3ResponseProperties *properties, void *callbackData);
/**
* This callback is made when the response has been completely received, or an
* error has occurred which has prematurely aborted the request, or one of the
* other user-supplied callbacks returned a value intended to abort the
- * request. This callback is always made for every request, as the very last
+ * request. This callback is always made for every request, as the very last
* callback made for that request.
*
* @param status gives the overall status of the response, indicating success
- * or failure; use S3_status_is_retryable() as a simple way to detect
- * whether or not the status indicates that the request failed but may
- * be retried.
+ * or failure; use S3_status_is_retryable() as a simple way to detect
+ * whether or not the status indicates that the request failed but may
+ * be retried.
* @param errorDetails if non-NULL, gives details as returned by the S3
- * service, describing the error
+ * service, describing the error
* @param callbackData is the callback data as specified when the request
- * was issued.
+ * was issued.
**/
typedef void (S3ResponseCompleteCallback)(S3Status status,
- const S3ErrorDetails *errorDetails,
- void *callbackData);
+ const S3ErrorDetails *errorDetails,
+ void *callbackData);
-
+
/**
* This callback is made for each bucket resulting from a list service
* operation.
@@ -909,21 +909,21 @@ typedef void (S3ResponseCompleteCallback)(S3Status status,
* @param ownerDisplayName is the owner display name of the owner of the bucket
* @param bucketName is the name of the bucket
* @param creationDateSeconds if < 0 indicates that no creation date was
- * supplied for the bucket; if >= 0 indicates the number of seconds
- * since UNIX Epoch of the creation date of the bucket
+ * supplied for the bucket; if >= 0 indicates the number of seconds
+ * since UNIX Epoch of the creation date of the bucket
* @param callbackData is the callback data as specified when the request
- * was issued.
+ * was issued.
* @return S3StatusOK to continue processing the request, anything else to
- * immediately abort the request with a status which will be
- * passed to the S3ResponseCompleteCallback for this request.
- * Typically, this will return either S3StatusOK or
- * S3StatusAbortedByCallback.
+ * immediately abort the request with a status which will be
+ * passed to the S3ResponseCompleteCallback for this request.
+ * Typically, this will return either S3StatusOK or
+ * S3StatusAbortedByCallback.
**/
typedef S3Status (S3ListServiceCallback)(const char *ownerId,
- const char *ownerDisplayName,
- const char *bucketName,
- int64_t creationDateSeconds,
- void *callbackData);
+ const char *ownerDisplayName,
+ const char *bucketName,
+ int64_t creationDateSeconds,
+ void *callbackData);
/**
@@ -933,37 +933,37 @@ typedef S3Status (S3ListServiceCallback)(const char *ownerId,
* report all items resulting from the list bucket operation.
*
* @param isTruncated is true if the list bucket request was truncated by the
- * S3 service, in which case the remainder of the list may be obtained
- * by querying again using the Marker parameter to start the query
- * after this set of results
+ * S3 service, in which case the remainder of the list may be obtained
+ * by querying again using the Marker parameter to start the query
+ * after this set of results
* @param nextMarker if present, gives the largest (alphabetically) key
- * returned in the response, which, if isTruncated is true, may be used
- * as the marker in a subsequent list buckets operation to continue
- * listing
+ * returned in the response, which, if isTruncated is true, may be used
+ * as the marker in a subsequent list buckets operation to continue
+ * listing
* @param contentsCount is the number of ListBucketContent structures in the
- * contents parameter
+ * contents parameter
* @param contents is an array of ListBucketContent structures, each one
- * describing an object in the bucket
+ * describing an object in the bucket
* @param commonPrefixesCount is the number of common prefixes strings in the
- * commonPrefixes parameter
+ * commonPrefixes parameter
* @param commonPrefixes is an array of strings, each specifing one of the
- * common prefixes as returned by S3
+ * common prefixes as returned by S3
* @param callbackData is the callback data as specified when the request
- * was issued.
+ * was issued.
* @return S3StatusOK to continue processing the request, anything else to
- * immediately abort the request with a status which will be
- * passed to the S3ResponseCompleteCallback for this request.
- * Typically, this will return either S3StatusOK or
- * S3StatusAbortedByCallback.
+ * immediately abort the request with a status which will be
+ * passed to the S3ResponseCompleteCallback for this request.
+ * Typically, this will return either S3StatusOK or
+ * S3StatusAbortedByCallback.
**/
typedef S3Status (S3ListBucketCallback)(int isTruncated,
- const char *nextMarker,
- int contentsCount,
- const S3ListBucketContent *contents,
- int commonPrefixesCount,
- const char **commonPrefixes,
- void *callbackData);
-
+ const char *nextMarker,
+ int contentsCount,
+ const S3ListBucketContent *contents,
+ int commonPrefixesCount,
+ const char **commonPrefixes,
+ void *callbackData);
+
/**
* This callback is made during a put object operation, to obtain the next
@@ -972,19 +972,19 @@ typedef S3Status (S3ListBucketCallback)(int isTruncated,
* write to the service, until a negative or 0 value is returned.
*
* @param bufferSize gives the maximum number of bytes that may be written
- * into the buffer parameter by this callback
+ * into the buffer parameter by this callback
* @param buffer gives the buffer to fill with at most bufferSize bytes of
- * data as the next chunk of data to send to S3 as the contents of this
- * object
+ * data as the next chunk of data to send to S3 as the contents of this
+ * object
* @param callbackData is the callback data as specified when the request
- * was issued.
+ * was issued.
* @return < 0 to abort the request with the S3StatusAbortedByCallback, which
- * will be pased to the response complete callback for this request, or
- * 0 to indicate the end of data, or > 0 to identify the number of
- * bytes that were written into the buffer by this callback
+ * will be pased to the response complete callback for this request, or
+ * 0 to indicate the end of data, or > 0 to identify the number of
+ * bytes that were written into the buffer by this callback
**/
typedef int (S3PutObjectDataCallback)(int bufferSize, char *buffer,
- void *callbackData);
+ void *callbackData);
/**
@@ -998,16 +998,16 @@ typedef int (S3PutObjectDataCallback)(int bufferSize, char *buffer,
* @param bufferSize gives the number of bytes in buffer
* @param buffer is the data being passed into the callback
* @param callbackData is the callback data as specified when the request
- * was issued.
+ * was issued.
* @return S3StatusOK to continue processing the request, anything else to
- * immediately abort the request with a status which will be
- * passed to the S3ResponseCompleteCallback for this request.
- * Typically, this will return either S3StatusOK or
- * S3StatusAbortedByCallback.
+ * immediately abort the request with a status which will be
+ * passed to the S3ResponseCompleteCallback for this request.
+ * Typically, this will return either S3StatusOK or
+ * S3StatusAbortedByCallback.
**/
typedef S3Status (S3GetObjectDataCallback)(int bufferSize, const char *buffer,
- void *callbackData);
-
+ void *callbackData);
+
/** **************************************************************************
* Callback Structures
@@ -1020,20 +1020,20 @@ typedef S3Status (S3GetObjectDataCallback)(int bufferSize, const char *buffer,
**/
typedef struct S3ResponseHandler
{
- /**
- * The propertiesCallback is made when the response properties have
- * successfully been returned from S3. This function may not be called
- * if the response properties were not successfully returned from S3.
- **/
- S3ResponsePropertiesCallback *propertiesCallback;
-
- /**
- * The completeCallback is always called for every request made to S3,
- * regardless of the outcome of the request. It provides the status of
- * the request upon its completion, as well as extra error details in the
- * event of an S3 error.
- **/
- S3ResponseCompleteCallback *completeCallback;
+ /**
+ * The propertiesCallback is made when the response properties have
+ * successfully been returned from S3. This function may not be called
+ * if the response properties were not successfully returned from S3.
+ **/
+ S3ResponsePropertiesCallback *propertiesCallback;
+
+ /**
+ * The completeCallback is always called for every request made to S3,
+ * regardless of the outcome of the request. It provides the status of
+ * the request upon its completion, as well as extra error details in the
+ * event of an S3 error.
+ **/
+ S3ResponseCompleteCallback *completeCallback;
} S3ResponseHandler;
@@ -1043,16 +1043,16 @@ typedef struct S3ResponseHandler
**/
typedef struct S3ListServiceHandler
{
- /**
- * responseHandler provides the properties and complete callback
- **/
- S3ResponseHandler responseHandler;
-
- /**
- * The listServiceCallback is called as items are reported back from S3 as
- * responses to the request
- **/
- S3ListServiceCallback *listServiceCallback;
+ /**
+ * responseHandler provides the properties and complete callback
+ **/
+ S3ResponseHandler responseHandler;
+
+ /**
+ * The listServiceCallback is called as items are reported back from S3 as
+ * responses to the request
+ **/
+ S3ListServiceCallback *listServiceCallback;
} S3ListServiceHandler;
@@ -1062,18 +1062,18 @@ typedef struct S3ListServiceHandler
**/
typedef struct S3ListBucketHandler
{
- /**
- * responseHandler provides the properties and complete callback
- **/
- S3ResponseHandler responseHandler;
-
- /**
- * The listBucketCallback is called as items are reported back from S3 as
- * responses to the request. This may be called more than one time per
- * list bucket request, each time providing more items from the list
- * operation.
- **/
- S3ListBucketCallback *listBucketCallback;
+ /**
+ * responseHandler provides the properties and complete callback
+ **/
+ S3ResponseHandler responseHandler;
+
+ /**
+ * The listBucketCallback is called as items are reported back from S3 as
+ * responses to the request. This may be called more than one time per
+ * list bucket request, each time providing more items from the list
+ * operation.
+ **/
+ S3ListBucketCallback *listBucketCallback;
} S3ListBucketHandler;
@@ -1083,18 +1083,18 @@ typedef struct S3ListBucketHandler
**/
typedef struct S3PutObjectHandler
{
- /**
- * responseHandler provides the properties and complete callback
- **/
- S3ResponseHandler responseHandler;
-
- /**
- * The putObjectDataCallback is called to acquire data to send to S3 as
- * the contents of the put_object request. It is made repeatedly until it
- * returns a negative number (indicating that the request should be
- * aborted), or 0 (indicating that all data has been supplied).
- **/
- S3PutObjectDataCallback *putObjectDataCallback;
+ /**
+ * responseHandler provides the properties and complete callback
+ **/
+ S3ResponseHandler responseHandler;
+
+ /**
+ * The putObjectDataCallback is called to acquire data to send to S3 as
+ * the contents of the put_object request. It is made repeatedly until it
+ * returns a negative number (indicating that the request should be
+ * aborted), or 0 (indicating that all data has been supplied).
+ **/
+ S3PutObjectDataCallback *putObjectDataCallback;
} S3PutObjectHandler;
@@ -1104,19 +1104,19 @@ typedef struct S3PutObjectHandler
**/
typedef struct S3GetObjectHandler
{
- /**
- * responseHandler provides the properties and complete callback
- **/
- S3ResponseHandler responseHandler;
-
- /**
- * The getObjectDataCallback is called as data is read from S3 as the
- * contents of the object being read in the get_object request. It is
- * called repeatedly until there is no more data provided in the request,
- * or until the callback returns an error status indicating that the
- * request should be aborted.
- **/
- S3GetObjectDataCallback *getObjectDataCallback;
+ /**
+ * responseHandler provides the properties and complete callback
+ **/
+ S3ResponseHandler responseHandler;
+
+ /**
+ * The getObjectDataCallback is called as data is read from S3 as the
+ * contents of the object being read in the get_object request. It is
+ * called repeatedly until there is no more data provided in the request,
+ * or until the callback returns an error status indicating that the
+ * request should be aborted.
+ **/
+ S3GetObjectDataCallback *getObjectDataCallback;
} S3GetObjectHandler;
@@ -1128,39 +1128,39 @@ typedef struct S3GetObjectHandler
* Initializes libs3 for use. This function must be called before any other
* libs3 function is called. It may be called multiple times, with the same
* effect as calling it once, as long as S3_deinitialize() is called an
- * equal number of times when the program has finished. This function is NOT
+ * equal number of times when the program has finished. This function is NOT
* thread-safe and must only be called by one thread at a time.
*
* @param userAgentInfo is a string that will be included in the User-Agent
- * header of every request made to the S3 service. You may provide
- * NULL or the empty string if you don't care about this. The value
- * will not be copied by this function and must remain unaltered by the
- * caller until S3_deinitialize() is called.
+ * header of every request made to the S3 service. You may provide
+ * NULL or the empty string if you don't care about this. The value
+ * will not be copied by this function and must remain unaltered by the
+ * caller until S3_deinitialize() is called.
* @param flags is a bitmask of some combination of S3_INIT_XXX flag, or
- * S3_INIT_ALL, indicating which of the libraries that libs3 depends
- * upon should be initialized by S3_initialize(). Only if your program
- * initializes one of these dependency libraries itself should anything
- * other than S3_INIT_ALL be passed in for this bitmask.
+ * S3_INIT_ALL, indicating which of the libraries that libs3 depends
+ * upon should be initialized by S3_initialize(). Only if your program
+ * initializes one of these dependency libraries itself should anything
+ * other than S3_INIT_ALL be passed in for this bitmask.
*
- * You should pass S3_INIT_WINSOCK if and only if your application does
- * not initialize winsock elsewhere. On non-Microsoft Windows
- * platforms it has no effect.
+ * You should pass S3_INIT_WINSOCK if and only if your application does
+ * not initialize winsock elsewhere. On non-Microsoft Windows
+ * platforms it has no effect.
*
- * As a convenience, the macro S3_INIT_ALL is provided, which will do
- * all necessary initialization; however, be warned that things may
- * break if your application re-initializes the dependent libraries
- * later.
+ * As a convenience, the macro S3_INIT_ALL is provided, which will do
+ * all necessary initialization; however, be warned that things may
+ * break if your application re-initializes the dependent libraries
+ * later.
* @return One of:
- * S3StatusOK on success
- * S3StatusInternalError if dependent libraries could not be
- * initialized
- * S3StatusOutOfMemory on failure due to out of memory
+ * S3StatusOK on success
+ * S3StatusInternalError if dependent libraries could not be
+ * initialized
+ * S3StatusOutOfMemory on failure due to out of memory
**/
S3Status S3_initialize(const char *userAgentInfo, int flags);
/**
- * Must be called once per program for each call to libs3_initialize(). After
+ * Must be called once per program for each call to libs3_initialize(). After
* this call is complete, no libs3 function may be called except
* S3_initialize().
**/
@@ -1180,73 +1180,73 @@ const char *S3_get_status_name(S3Status status);
* This function may be used to validate an S3 bucket name as being in the
* correct form for use with the S3 service. Amazon S3 limits the allowed
* characters in S3 bucket names, as well as imposing some additional rules on
- * the length of bucket names and their structure. There are actually two
+ * the length of bucket names and their structure. There are actually two
* limits; one for bucket names used only in path-style URIs, and a more
- * strict limit used for bucket names used in virtual-host-style URIs. It is
+ * strict limit used for bucket names used in virtual-host-style URIs. It is
* advisable to use only bucket names which meet the more strict requirements
* regardless of how the bucket expected to be used.
*
* This method does NOT validate that the bucket is available for use in the
* S3 service, so the return value of this function cannot be used to decide
* whether or not a bucket with the give name already exists in Amazon S3 or
- * is accessible by the caller. It merely validates that the bucket name is
+ * is accessible by the caller. It merely validates that the bucket name is
* valid for use with S3.
*
* @param bucketName is the bucket name to validate
* @param uriStyle gives the URI style to validate the bucket name against.
- * It is advisable to always use S3UriStyleVirtuallHost.
+ * It is advisable to always use S3UriStyleVirtuallHost.
* @return One of:
- * S3StatusOK if the bucket name was validates successfully
- * S3StatusInvalidBucketNameTooLong if the bucket name exceeded the
- * length limitation for the URI style, which is 255 bytes for
- * path style URIs and 63 bytes for virtual host type URIs
- * S3StatusInvalidBucketNameTooShort if the bucket name is less than
- * 3 characters
- * S3StatusInvalidBucketNameFirstCharacter if the bucket name as an
- * invalid first character, which is anything other than
- * an alphanumeric character
- * S3StatusInvalidBucketNameCharacterSequence if the bucket name
- * includes an invalid character sequence, which for virtual host
- * style buckets is ".-" or "-."
- * S3StatusInvalidBucketNameCharacter if the bucket name includes an
- * invalid character, which is anything other than alphanumeric,
- * '-', '.', or for path style URIs only, '_'.
- * S3StatusInvalidBucketNameDotQuadNotation if the bucket name is in
- * dot-quad notation, i.e. the form of an IP address, which is
- * not allowed by Amazon S3.
+ * S3StatusOK if the bucket name was validates successfully
+ * S3StatusInvalidBucketNameTooLong if the bucket name exceeded the
+ * length limitation for the URI style, which is 255 bytes for
+ * path style URIs and 63 bytes for virtual host type URIs
+ * S3StatusInvalidBucketNameTooShort if the bucket name is less than
+ * 3 characters
+ * S3StatusInvalidBucketNameFirstCharacter if the bucket name as an
+ * invalid first character, which is anything other than
+ * an alphanumeric character
+ * S3StatusInvalidBucketNameCharacterSequence if the bucket name
+ * includes an invalid character sequence, which for virtual host
+ * style buckets is ".-" or "-."
+ * S3StatusInvalidBucketNameCharacter if the bucket name includes an
+ * invalid character, which is anything other than alphanumeric,
+ * '-', '.', or for path style URIs only, '_'.
+ * S3StatusInvalidBucketNameDotQuadNotation if the bucket name is in
+ * dot-quad notation, i.e. the form of an IP address, which is
+ * not allowed by Amazon S3.
**/
S3Status S3_validate_bucket_name(const char *bucketName, S3UriStyle uriStyle);
/**
* Converts an XML representation of an ACL to a libs3 structured
- * representation. This method is not strictly necessary for working with
+ * representation. This method is not strictly necessary for working with
* ACLs using libs3, but may be convenient for users of the library who read
* ACLs from elsewhere in XML format and need to use these ACLs with libs3.
*
- * @param aclXml is the XML representation of the ACL. This must be a
- * zero-terminated character string.
+ * @param aclXml is the XML representation of the ACL. This must be a
+ * zero-terminated character string.
* @param ownerId will be filled in with the Owner ID specified in the XML.
- * At most MAX_GRANTEE_USER_ID_SIZE bytes will be stored at this
- * location.
+ * At most MAX_GRANTEE_USER_ID_SIZE bytes will be stored at this
+ * location.
* @param ownerDisplayName will be filled in with the Owner Display Name
- * specified in the XML. At most MAX_GRANTEE_DISPLAY_NAME_SIZE bytes
- * will be stored at this location.
+ * specified in the XML. At most MAX_GRANTEE_DISPLAY_NAME_SIZE bytes
+ * will be stored at this location.
* @param aclGrantCountReturn returns the number of S3AclGrant structures
- * returned in the aclGrantsReturned array
+ * returned in the aclGrantsReturned array
* @param aclGrants must be passed in as an array of at least S3_ACL_MAXCOUNT
- * structures, and on return from this function, the first
- * aclGrantCountReturn structures will be filled in with the ACLs
- * represented by the input XML.
+ * structures, and on return from this function, the first
+ * aclGrantCountReturn structures will be filled in with the ACLs
+ * represented by the input XML.
* @return One of:
- * S3StatusOK on successful conversion of the ACL
- * S3StatusInternalError on internal error representing a bug in the
- * libs3 library
- * S3StatusXmlParseFailure if the XML document was malformed
+ * S3StatusOK on successful conversion of the ACL
+ * S3StatusInternalError on internal error representing a bug in the
+ * libs3 library
+ * S3StatusXmlParseFailure if the XML document was malformed
**/
S3Status S3_convert_acl(char *aclXml, char *ownerId, char *ownerDisplayName,
- int *aclGrantCountReturn, S3AclGrant *aclGrants);
-
+ int *aclGrantCountReturn, S3AclGrant *aclGrants);
+
/**
* Returns nonzero if the status indicates that the request should be
@@ -1273,15 +1273,15 @@ int S3_status_is_retryable(S3Status status);
* request function has returned.
*
* @param requestContextReturn returns the newly-created S3RequestContext
- * structure, which if successfully returned, must be destroyed via a
- * call to S3_destroy_request_context when it is no longer needed. If
- * an error status is returned from this function, then
- * requestContextReturn will not have been filled in, and
- * S3_destroy_request_context should not be called on it
+ * structure, which if successfully returned, must be destroyed via a
+ * call to S3_destroy_request_context when it is no longer needed. If
+ * an error status is returned from this function, then
+ * requestContextReturn will not have been filled in, and
+ * S3_destroy_request_context should not be called on it
* @return One of:
- * S3StatusOK if the request context was successfully created
- * S3StatusOutOfMemory if the request context could not be created due
- * to an out of memory error
+ * S3StatusOK if the request context was successfully created
+ * S3StatusOutOfMemory if the request context could not be created due
+ * to an out of memory error
**/
S3Status S3_create_request_context(S3RequestContext **requestContextReturn);
@@ -1302,13 +1302,13 @@ void S3_destroy_request_context(S3RequestContext *requestContext);
* or until an error occurs.
*
* @param requestContext is the S3RequestContext to run until all requests
- * within it have completed or until an error occurs
+ * within it have completed or until an error occurs
* @return One of:
- * S3Status if all requests were successfully run to completion
- * S3StatusInternalError if an internal error prevented the
- * S3RequestContext from running one or more requests
- * S3StatusOutOfMemory if requests could not be run to completion
- * due to an out of memory error
+ * S3Status if all requests were successfully run to completion
+ * S3StatusInternalError if an internal error prevented the
+ * S3RequestContext from running one or more requests
+ * S3StatusOutOfMemory if requests could not be run to completion
+ * due to an out of memory error
**/
S3Status S3_runall_request_context(S3RequestContext *requestContext);
@@ -1322,22 +1322,22 @@ S3Status S3_runall_request_context(S3RequestContext *requestContext);
*
* @param requestContext is the S3RequestContext to process
* @param requestsRemainingReturn returns the number of requests remaining
- * and not yet completed within the S3RequestContext after this
- * function returns.
+ * and not yet completed within the S3RequestContext after this
+ * function returns.
* @return One of:
- * S3StatusOK if request processing proceeded without error
- * S3StatusInternalError if an internal error prevented the
- * S3RequestContext from running one or more requests
- * S3StatusOutOfMemory if requests could not be processed due to
- * an out of memory error
+ * S3StatusOK if request processing proceeded without error
+ * S3StatusInternalError if an internal error prevented the
+ * S3RequestContext from running one or more requests
+ * S3StatusOutOfMemory if requests could not be processed due to
+ * an out of memory error
**/
S3Status S3_runonce_request_context(S3RequestContext *requestContext,
- int *requestsRemainingReturn);
+ int *requestsRemainingReturn);
/**
* This function, in conjunction allows callers to manually manage a set of
- * requests using an S3RequestContext. This function returns the set of file
+ * requests using an S3RequestContext. This function returns the set of file
* descriptors which the caller can watch (typically using select()), along
* with any other file descriptors of interest to the caller, and using
* whatever timeout (if any) the caller wishes, until one or more file
@@ -1347,27 +1347,27 @@ S3Status S3_runonce_request_context(S3RequestContext *requestContext,
*
* @param requestContext is the S3RequestContext to get fd_sets from
* @param readFdSet is a pointer to an fd_set which will have all file
- * descriptors to watch for read events for the requests in the
- * S3RequestContext set into it upon return. Should be zero'd out
- * (using FD_ZERO) before being passed into this function.
+ * descriptors to watch for read events for the requests in the
+ * S3RequestContext set into it upon return. Should be zero'd out
+ * (using FD_ZERO) before being passed into this function.
* @param writeFdSet is a pointer to an fd_set which will have all file
- * descriptors to watch for write events for the requests in the
- * S3RequestContext set into it upon return. Should be zero'd out
- * (using FD_ZERO) before being passed into this function.
+ * descriptors to watch for write events for the requests in the
+ * S3RequestContext set into it upon return. Should be zero'd out
+ * (using FD_ZERO) before being passed into this function.
* @param exceptFdSet is a pointer to an fd_set which will have all file
- * descriptors to watch for exception events for the requests in the
- * S3RequestContext set into it upon return. Should be zero'd out
- * (using FD_ZERO) before being passed into this function.
+ * descriptors to watch for exception events for the requests in the
+ * S3RequestContext set into it upon return. Should be zero'd out
+ * (using FD_ZERO) before being passed into this function.
* @param maxFd returns the highest file descriptor set into any of the
- * fd_sets, or -1 if no file descriptors were set
+ * fd_sets, or -1 if no file descriptors were set
* @return One of:
- * S3StatusOK if all fd_sets were successfully set
- * S3StatusInternalError if an internal error prevented this function
- * from completing successfully
+ * S3StatusOK if all fd_sets were successfully set
+ * S3StatusInternalError if an internal error prevented this function
+ * from completing successfully
**/
S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext,
- fd_set *readFdSet, fd_set *writeFdSet,
- fd_set *exceptFdSet, int *maxFd);
+ fd_set *readFdSet, fd_set *writeFdSet,
+ fd_set *exceptFdSet, int *maxFd);
/**
@@ -1382,7 +1382,7 @@ S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext,
*
* @param requestContext is the S3RequestContext to get the timeout from
* @return the maximum number of milliseconds to select() on fdsets. Callers
- * could wait a shorter time if they wish, but not longer.
+ * could wait a shorter time if they wish, but not longer.
**/
int64_t S3_get_request_context_timeout(S3RequestContext *requestContext);
@@ -1398,27 +1398,27 @@ int64_t S3_get_request_context_timeout(S3RequestContext *requestContext);
* of authenticated query string request.
*
* @param buffer is the output buffer for the authenticated query string.
- * It must be at least S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
- * length.
+ * It must be at least S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
+ * length.
* @param bucketContext gives the bucket and associated parameters for the
- * request to generate.
+ * request to generate.
* @param key gives the key which the authenticated request will GET.
* @param expires gives the number of seconds since Unix epoch for the
- * expiration date of the request; after this time, the request will
- * no longer be valid. If this value is negative, the largest
- * expiration date possible is used (currently, Jan 19, 2038).
+ * expiration date of the request; after this time, the request will
+ * no longer be valid. If this value is negative, the largest
+ * expiration date possible is used (currently, Jan 19, 2038).
* @param resource gives a sub-resource to be fetched for the request, or NULL
- * for none. This should be of the form "?<resource>", i.e.
- * "?torrent".
+ * for none. This should be of the form "?<resource>", i.e.
+ * "?torrent".
* @return One of:
- * S3StatusUriTooLong if, due to an internal error, the generated URI
- * is longer than S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
- * length and thus will not fit into the supplied buffer
- * S3StatusOK on success
+ * S3StatusUriTooLong if, due to an internal error, the generated URI
+ * is longer than S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
+ * length and thus will not fit into the supplied buffer
+ * S3StatusOK on success
**/
S3Status S3_generate_authenticated_query_string
- (char *buffer, const S3BucketContext *bucketContext,
- const char *key, int64_t expires, const char *resource);
+ (char *buffer, const S3BucketContext *bucketContext,
+ const char *key, int64_t expires, const char *resource);
/** **************************************************************************
@@ -1430,23 +1430,23 @@ S3Status S3_generate_authenticated_query_string
*
* @param protocol gives the protocol to use for this request
* @param accessKeyId gives the Amazon Access Key ID for which to list owned
- * buckets
+ * buckets
* @param secretAccessKey gives the Amazon Secret Access Key for which to list
- * owned buckets
+ * owned buckets
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_list_service(S3Protocol protocol, const char *accessKeyId,
- const char *secretAccessKey,
- S3RequestContext *requestContext,
- const S3ListServiceHandler *handler,
- void *callbackData);
-
+ const char *secretAccessKey,
+ S3RequestContext *requestContext,
+ const S3ListServiceHandler *handler,
+ void *callbackData);
+
/** **************************************************************************
* Bucket Functions
@@ -1459,60 +1459,60 @@ void S3_list_service(S3Protocol protocol, const char *accessKeyId,
* @param protocol gives the protocol to use for this request
* @param uriStyle gives the URI style to use for this request
* @param accessKeyId gives the Amazon Access Key ID for which to list owned
- * buckets
+ * buckets
* @param secretAccessKey gives the Amazon Secret Access Key for which to list
- * owned buckets
+ * owned buckets
* @param bucketName is the bucket name to test
* @param locationConstraintReturnSize gives the number of bytes in the
- * locationConstraintReturn parameter
+ * locationConstraintReturn parameter
* @param locationConstraintReturn provides the location into which to write
- * the name of the location constraint naming the geographic location
- * of the S3 bucket. This must have at least as many characters in it
- * as specified by locationConstraintReturn, and should start out
- * NULL-terminated. On successful completion of this request, this
- * will be set to the name of the geographic location of S3 bucket, or
- * will be left as a zero-length string if no location was available.
+ * the name of the location constraint naming the geographic location
+ * of the S3 bucket. This must have at least as many characters in it
+ * as specified by locationConstraintReturn, and should start out
+ * NULL-terminated. On successful completion of this request, this
+ * will be set to the name of the geographic location of S3 bucket, or
+ * will be left as a zero-length string if no location was available.
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
- const char *accessKeyId, const char *secretAccessKey,
- const char *bucketName, int locationConstraintReturnSize,
- char *locationConstraintReturn,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ const char *accessKeyId, const char *secretAccessKey,
+ const char *bucketName, int locationConstraintReturnSize,
+ char *locationConstraintReturn,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
-
+
/**
* Creates a new bucket.
*
* @param protocol gives the protocol to use for this request
* @param accessKeyId gives the Amazon Access Key ID for which to list owned
- * buckets
+ * buckets
* @param secretAccessKey gives the Amazon Secret Access Key for which to list
- * owned buckets
+ * owned buckets
* @param bucketName is the name of the bucket to be created
* @param cannedAcl gives the "REST canned ACL" to use for the created bucket
* @param locationConstraint if non-NULL, gives the geographic location for
- * the bucket to create.
+ * the bucket to create.
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
- const char *secretAccessKey, const char *bucketName,
- S3CannedAcl cannedAcl, const char *locationConstraint,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ const char *secretAccessKey, const char *bucketName,
+ S3CannedAcl cannedAcl, const char *locationConstraint,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
/**
@@ -1522,49 +1522,49 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
* @param protocol gives the protocol to use for this request
* @param uriStyle gives the URI style to use for this request
* @param accessKeyId gives the Amazon Access Key ID for which to list owned
- * buckets
+ * buckets
* @param secretAccessKey gives the Amazon Secret Access Key for which to list
- * owned buckets
+ * owned buckets
* @param bucketName is the name of the bucket to be deleted
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
- const char *accessKeyId, const char *secretAccessKey,
- const char *bucketName, S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ const char *accessKeyId, const char *secretAccessKey,
+ const char *bucketName, S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
/**
* Lists keys within a bucket.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param prefix if present, gives a prefix for matching keys
* @param marker if present, only keys occuring after this value will be
- * listed
+ * listed
* @param delimiter if present, causes keys that contain the same string
- * between the prefix and the first occurrence of the delimiter to be
- * rolled up into a single result element
+ * between the prefix and the first occurrence of the delimiter to be
+ * rolled up into a single result element
* @param maxkeys is the maximum number of keys to return
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_list_bucket(const S3BucketContext *bucketContext,
- const char *prefix, const char *marker,
- const char *delimiter, int maxkeys,
- S3RequestContext *requestContext,
- const S3ListBucketHandler *handler, void *callbackData);
+ const char *prefix, const char *marker,
+ const char *delimiter, int maxkeys,
+ S3RequestContext *requestContext,
+ const S3ListBucketHandler *handler, void *callbackData);
/** **************************************************************************
@@ -1572,31 +1572,31 @@ void S3_list_bucket(const S3BucketContext *bucketContext,
************************************************************************** **/
/**
- * Puts object data to S3. This overwrites any existing object at that key;
- * note that S3 currently only supports full-object upload. The data to
+ * Puts object data to S3. This overwrites any existing object at that key;
+ * note that S3 currently only supports full-object upload. The data to
* upload will be acquired by calling the handler's putObjectDataCallback.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param key is the key of the object to put to
* @param contentLength is required and gives the total number of bytes that
- * will be put
+ * will be put
* @param putProperties optionally provides additional properties to apply to
- * the object that is being put to
+ * the object that is being put to
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_put_object(const S3BucketContext *bucketContext, const char *key,
- uint64_t contentLength,
- const S3PutProperties *putProperties,
- S3RequestContext *requestContext,
- const S3PutObjectHandler *handler, void *callbackData);
-
+ uint64_t contentLength,
+ const S3PutProperties *putProperties,
+ S3RequestContext *requestContext,
+ const S3PutObjectHandler *handler, void *callbackData);
+
/**
* Copies an object from one location to another. The object may be copied
@@ -1604,106 +1604,106 @@ void S3_put_object(const S3BucketContext *bucketContext, const char *key,
* the object.
*
* @param bucketContext gives the source bucket and associated parameters for
- * this request
+ * this request
* @param key is the source key
* @param destinationBucket gives the destination bucket into which to copy
- * the object. If NULL, the source bucket will be used.
+ * the object. If NULL, the source bucket will be used.
* @param destinationKey gives the destination key into which to copy the
- * object. If NULL, the source key will be used.
+ * object. If NULL, the source key will be used.
* @param putProperties optionally provides properties to apply to the object
- * that is being put to. If not supplied (i.e. NULL is passed in),
- * then the copied object will retain the metadata of the copied
- * object.
+ * that is being put to. If not supplied (i.e. NULL is passed in),
+ * then the copied object will retain the metadata of the copied
+ * object.
* @param lastModifiedReturn returns the last modified date of the copied
- * object
+ * object
* @param eTagReturnSize specifies the number of bytes provided in the
- * eTagReturn buffer
+ * eTagReturn buffer
* @param eTagReturn is a buffer into which the resulting eTag of the copied
- * object will be written
+ * object will be written
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_copy_object(const S3BucketContext *bucketContext,
- const char *key, const char *destinationBucket,
- const char *destinationKey,
- const S3PutProperties *putProperties,
- int64_t *lastModifiedReturn, int eTagReturnSize,
- char *eTagReturn, S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ const char *key, const char *destinationBucket,
+ const char *destinationKey,
+ const S3PutProperties *putProperties,
+ int64_t *lastModifiedReturn, int eTagReturnSize,
+ char *eTagReturn, S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
/**
- * Gets an object from S3. The contents of the object are returned in the
+ * Gets an object from S3. The contents of the object are returned in the
* handler's getObjectDataCallback.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param key is the key of the object to get
* @param getConditions if non-NULL, gives a set of conditions which must be
- * met in order for the request to succeed
+ * met in order for the request to succeed
* @param startByte gives the start byte for the byte range of the contents
- * to be returned
+ * to be returned
* @param byteCount gives the number of bytes to return; a value of 0
- * indicates that the contents up to the end should be returned
+ * indicates that the contents up to the end should be returned
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_get_object(const S3BucketContext *bucketContext, const char *key,
- const S3GetConditions *getConditions,
- uint64_t startByte, uint64_t byteCount,
- S3RequestContext *requestContext,
- const S3GetObjectHandler *handler, void *callbackData);
+ const S3GetConditions *getConditions,
+ uint64_t startByte, uint64_t byteCount,
+ S3RequestContext *requestContext,
+ const S3GetObjectHandler *handler, void *callbackData);
/**
* Gets the response properties for the object, but not the object contents.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param key is the key of the object to get the properties of
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_head_object(const S3BucketContext *bucketContext, const char *key,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
-
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
+
/**
* Deletes an object from S3.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param key is the key of the object to delete
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_delete_object(const S3BucketContext *bucketContext, const char *key,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
/** **************************************************************************
@@ -1714,65 +1714,65 @@ void S3_delete_object(const S3BucketContext *bucketContext, const char *key,
* Gets the ACL for the given bucket or object.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param key is the key of the object to get the ACL of; or NULL to get the
- * ACL of the bucket
+ * ACL of the bucket
* @param ownerId must be supplied as a buffer of at least
- * S3_MAX_GRANTEE_USER_ID_SIZE bytes, and will be filled in with the
- * owner ID of the object/bucket
+ * S3_MAX_GRANTEE_USER_ID_SIZE bytes, and will be filled in with the
+ * owner ID of the object/bucket
* @param ownerDisplayName must be supplied as a buffer of at least
- * S3_MAX_GRANTEE_DISPLAY_NAME_SIZE bytes, and will be filled in with
- * the display name of the object/bucket
+ * S3_MAX_GRANTEE_DISPLAY_NAME_SIZE bytes, and will be filled in with
+ * the display name of the object/bucket
* @param aclGrantCountReturn returns the number of S3AclGrant structures
- * returned in the aclGrants parameter
+ * returned in the aclGrants parameter
* @param aclGrants must be passed in as an array of at least
- * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, which will be filled
- * in with the grant information for the ACL
+ * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, which will be filled
+ * in with the grant information for the ACL
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_get_acl(const S3BucketContext *bucketContext, const char *key,
- char *ownerId, char *ownerDisplayName,
- int *aclGrantCountReturn, S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ char *ownerId, char *ownerDisplayName,
+ int *aclGrantCountReturn, S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
/**
* Sets the ACL for the given bucket or object.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request
+ * request
* @param key is the key of the object to set the ACL for; or NULL to set the
- * ACL for the bucket
- * @param ownerId is the owner ID of the object/bucket. Unfortunately, S3
- * requires this to be valid and thus it must have been fetched by a
- * previous S3 request, such as a list_buckets request.
+ * ACL for the bucket
+ * @param ownerId is the owner ID of the object/bucket. Unfortunately, S3
+ * requires this to be valid and thus it must have been fetched by a
+ * previous S3 request, such as a list_buckets request.
* @param ownerDisplayName is the owner display name of the object/bucket.
- * Unfortunately, S3 requires this to be valid and thus it must have
- * been fetched by a previous S3 request, such as a list_buckets
- * request.
+ * Unfortunately, S3 requires this to be valid and thus it must have
+ * been fetched by a previous S3 request, such as a list_buckets
+ * request.
* @param aclGrantCount is the number of ACL grants to set for the
- * object/bucket
+ * object/bucket
* @param aclGrants are the ACL grants to set for the object/bucket
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
- const char *ownerId, const char *ownerDisplayName,
- int aclGrantCount, const S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData);
+ const char *ownerId, const char *ownerDisplayName,
+ int aclGrantCount, const S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData);
/** **************************************************************************
@@ -1786,44 +1786,44 @@ void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
* settings controlling how these logs will be written.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request; this is the bucket for which service access logging is
- * being requested
+ * request; this is the bucket for which service access logging is
+ * being requested
* @param targetBucketReturn must be passed in as a buffer of at least
- * (S3_MAX_BUCKET_NAME_SIZE + 1) bytes in length, and will be filled
- * in with the target bucket name for access logging for the given
- * bucket, which is the bucket into which access logs for the specified
- * bucket will be written. This is returned as an empty string if
- * service access logging is not enabled for the given bucket.
+ * (S3_MAX_BUCKET_NAME_SIZE + 1) bytes in length, and will be filled
+ * in with the target bucket name for access logging for the given
+ * bucket, which is the bucket into which access logs for the specified
+ * bucket will be written. This is returned as an empty string if
+ * service access logging is not enabled for the given bucket.
* @param targetPrefixReturn must be passed in as a buffer of at least
- * (S3_MAX_KEY_SIZE + 1) bytes in length, and will be filled in
- * with the key prefix for server access logs for the given bucket,
- * or the empty string if no such prefix is specified.
+ * (S3_MAX_KEY_SIZE + 1) bytes in length, and will be filled in
+ * with the key prefix for server access logs for the given bucket,
+ * or the empty string if no such prefix is specified.
* @param aclGrantCountReturn returns the number of ACL grants that are
- * associated with the server access logging for the given bucket.
+ * associated with the server access logging for the given bucket.
* @param aclGrants must be passed in as an array of at least
- * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, and these will be
- * filled in with the target grants associated with the server access
- * logging for the given bucket, whose number is returned in the
- * aclGrantCountReturn parameter. These grants will be applied to the
- * ACL of any server access logging log files generated by the S3
- * service for the given bucket.
+ * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, and these will be
+ * filled in with the target grants associated with the server access
+ * logging for the given bucket, whose number is returned in the
+ * aclGrantCountReturn parameter. These grants will be applied to the
+ * ACL of any server access logging log files generated by the S3
+ * service for the given bucket.
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_get_server_access_logging(const S3BucketContext *bucketContext,
- char *targetBucketReturn,
- char *targetPrefixReturn,
- int *aclGrantCountReturn,
- S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler,
- void *callbackData);
-
+ char *targetBucketReturn,
+ char *targetPrefixReturn,
+ int *aclGrantCountReturn,
+ S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler,
+ void *callbackData);
+
/**
* Sets the service access logging settings for a bucket. The service access
@@ -1832,36 +1832,36 @@ void S3_get_server_access_logging(const S3BucketContext *bucketContext,
* settings controlling how these logs will be written.
*
* @param bucketContext gives the bucket and associated parameters for this
- * request; this is the bucket for which service access logging is
- * being set
+ * request; this is the bucket for which service access logging is
+ * being set
* @param targetBucket gives the target bucket name for access logging for the
- * given bucket, which is the bucket into which access logs for the
- * specified bucket will be written.
+ * given bucket, which is the bucket into which access logs for the
+ * specified bucket will be written.
* @param targetPrefix is an option parameter which specifies the key prefix
- * for server access logs for the given bucket, or NULL if no such
- * prefix is to be used.
+ * for server access logs for the given bucket, or NULL if no such
+ * prefix is to be used.
* @param aclGrantCount specifies the number of ACL grants that are to be
- * associated with the server access logging for the given bucket.
+ * associated with the server access logging for the given bucket.
* @param aclGrants is as an array of S3AclGrant structures, whose number is
- * given by the aclGrantCount parameter. These grants will be applied
- * to the ACL of any server access logging log files generated by the
- * S3 service for the given bucket.
+ * given by the aclGrantCount parameter. These grants will be applied
+ * to the ACL of any server access logging log files generated by the
+ * S3 service for the given bucket.
* @param requestContext if non-NULL, gives the S3RequestContext to add this
- * request to, and does not perform the request immediately. If NULL,
- * performs the request immediately and synchronously.
+ * request to, and does not perform the request immediately. If NULL,
+ * performs the request immediately and synchronously.
* @param handler gives the callbacks to call as the request is processed and
- * completed
+ * completed
* @param callbackData will be passed in as the callbackData parameter to
- * all callbacks for this request
+ * all callbacks for this request
**/
void S3_set_server_access_logging(const S3BucketContext *bucketContext,
- const char *targetBucket,
- const char *targetPrefix, int aclGrantCount,
- const S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler,
- void *callbackData);
-
+ const char *targetBucket,
+ const char *targetPrefix, int aclGrantCount,
+ const S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler,
+ void *callbackData);
+
#ifdef __cplusplus
}
diff --git a/inc/mingw/pthread.h b/inc/mingw/pthread.h
index 44a029a..674a62a 100644
--- a/inc/mingw/pthread.h
+++ b/inc/mingw/pthread.h
@@ -34,7 +34,7 @@ unsigned long pthread_self();
typedef struct
{
- CRITICAL_SECTION criticalSection;
+ CRITICAL_SECTION criticalSection;
} pthread_mutex_t;
int pthread_mutex_init(pthread_mutex_t *mutex, void *);
diff --git a/inc/mingw/sys/utsname.h b/inc/mingw/sys/utsname.h
index 1501b84..1e6b470 100644
--- a/inc/mingw/sys/utsname.h
+++ b/inc/mingw/sys/utsname.h
@@ -32,8 +32,8 @@
struct utsname
{
- const char *sysname;
- const char *machine;
+ const char *sysname;
+ const char *machine;
};
int uname(struct utsname *);
diff --git a/inc/request.h b/inc/request.h
index cbfaf67..afb4929 100644
--- a/inc/request.h
+++ b/inc/request.h
@@ -35,71 +35,71 @@
// Describes a type of HTTP request (these are our supported HTTP "verbs")
typedef enum
{
- HttpRequestTypeGET,
- HttpRequestTypeHEAD,
- HttpRequestTypePUT,
- HttpRequestTypeCOPY,
- HttpRequestTypeDELETE
+ HttpRequestTypeGET,
+ HttpRequestTypeHEAD,
+ HttpRequestTypePUT,
+ HttpRequestTypeCOPY,
+ HttpRequestTypeDELETE
} HttpRequestType;
-// This completely describes a request. A RequestParams is not required to be
+// This completely describes a request. A RequestParams is not required to be
// allocated from the heap and its lifetime is not assumed to extend beyond
// the lifetime of the function to which it has been passed.
typedef struct RequestParams
{
- // Request type, affects the HTTP verb used
- HttpRequestType httpRequestType;
+ // Request type, affects the HTTP verb used
+ HttpRequestType httpRequestType;
- // Bucket context for request
- S3BucketContext bucketContext;
+ // Bucket context for request
+ S3BucketContext bucketContext;
- // Key, if any
- const char *key;
+ // Key, if any
+ const char *key;
- // Query params - ready to append to URI (i.e. ?p1=v1?p2=v2)
- const char *queryParams;
+ // Query params - ready to append to URI (i.e. ?p1=v1?p2=v2)
+ const char *queryParams;
- // sub resource, like ?acl, ?location, ?torrent, ?logging
- const char *subResource;
+ // sub resource, like ?acl, ?location, ?torrent, ?logging
+ const char *subResource;
- // If this is a copy operation, this gives the source bucket
- const char *copySourceBucketName;
+ // If this is a copy operation, this gives the source bucket
+ const char *copySourceBucketName;
- // If this is a copy operation, this gives the source key
- const char *copySourceKey;
+ // If this is a copy operation, this gives the source key
+ const char *copySourceKey;
- // Get conditions
- const S3GetConditions *getConditions;
+ // Get conditions
+ const S3GetConditions *getConditions;
- // Start byte
- uint64_t startByte;
+ // Start byte
+ uint64_t startByte;
- // Byte count
- uint64_t byteCount;
+ // Byte count
+ uint64_t byteCount;
- // Put properties
- const S3PutProperties *putProperties;
+ // Put properties
+ const S3PutProperties *putProperties;
- // Callback to be made when headers are available. Might not be called.
- S3ResponsePropertiesCallback *propertiesCallback;
+ // Callback to be made when headers are available. Might not be called.
+ S3ResponsePropertiesCallback *propertiesCallback;
- // Callback to be made to supply data to send to S3. Might not be called.
- S3PutObjectDataCallback *toS3Callback;
+ // Callback to be made to supply data to send to S3. Might not be called.
+ S3PutObjectDataCallback *toS3Callback;
- // Number of bytes total that readCallback will supply
- int64_t toS3CallbackTotalSize;
+ // Number of bytes total that readCallback will supply
+ int64_t toS3CallbackTotalSize;
- // Callback to be made that supplies data read from S3.
- // Might not be called.
- S3GetObjectDataCallback *fromS3Callback;
+ // Callback to be made that supplies data read from S3.
+ // Might not be called.
+ S3GetObjectDataCallback *fromS3Callback;
- // Callback to be made when request is complete. This will *always* be
- // called.
- S3ResponseCompleteCallback *completeCallback;
+ // Callback to be made when request is complete. This will *always* be
+ // called.
+ S3ResponseCompleteCallback *completeCallback;
- // Data passed to the callbacks
- void *callbackData;
+ // Data passed to the callbacks
+ void *callbackData;
} RequestParams;
@@ -107,57 +107,57 @@ typedef struct RequestParams
// (and thus live while a curl_multi is in use).
typedef struct Request
{
- // These put the request on a doubly-linked list of requests in a
- // request context, *if* the request is in a request context (else these
- // will both be 0)
- struct Request *prev, *next;
+ // These put the request on a doubly-linked list of requests in a
+ // request context, *if* the request is in a request context (else these
+ // will both be 0)
+ struct Request *prev, *next;
- // The status of this Request, as will be reported to the user via the
- // complete callback
- S3Status status;
+ // The status of this Request, as will be reported to the user via the
+ // complete callback
+ S3Status status;
- // The HTTP code returned by the S3 server, if it is known. Would rather
- // not have to keep track of this but S3 doesn't always indicate its
- // errors the same way
- int httpResponseCode;
+ // The HTTP code returned by the S3 server, if it is known. Would rather
+ // not have to keep track of this but S3 doesn't always indicate its
+ // errors the same way
+ int httpResponseCode;
- // The HTTP headers to use for the curl request
- struct curl_slist *headers;
+ // The HTTP headers to use for the curl request
+ struct curl_slist *headers;
- // The CURL structure driving the request
- CURL *curl;
+ // The CURL structure driving the request
+ CURL *curl;
- // libcurl requires that the uri be stored outside of the curl handle
- char uri[MAX_URI_SIZE + 1];
+ // libcurl requires that the uri be stored outside of the curl handle
+ char uri[MAX_URI_SIZE + 1];
- // Callback to be made when headers are available. Might not be called.
- S3ResponsePropertiesCallback *propertiesCallback;
+ // Callback to be made when headers are available. Might not be called.
+ S3ResponsePropertiesCallback *propertiesCallback;
- // Callback to be made to supply data to send to S3. Might not be called.
- S3PutObjectDataCallback *toS3Callback;
+ // Callback to be made to supply data to send to S3. Might not be called.
+ S3PutObjectDataCallback *toS3Callback;
- // Number of bytes total that readCallback has left to supply
- int64_t toS3CallbackBytesRemaining;
+ // Number of bytes total that readCallback has left to supply
+ int64_t toS3CallbackBytesRemaining;
- // Callback to be made that supplies data read from S3.
- // Might not be called.
- S3GetObjectDataCallback *fromS3Callback;
+ // Callback to be made that supplies data read from S3.
+ // Might not be called.
+ S3GetObjectDataCallback *fromS3Callback;
- // Callback to be made when request is complete. This will *always* be
- // called.
- S3ResponseCompleteCallback *completeCallback;
+ // Callback to be made when request is complete. This will *always* be
+ // called.
+ S3ResponseCompleteCallback *completeCallback;
- // Data passed to the callbacks
- void *callbackData;
+ // Data passed to the callbacks
+ void *callbackData;
- // Handler of response headers
- ResponseHeadersHandler responseHeadersHandler;
+ // Handler of response headers
+ ResponseHeadersHandler responseHeadersHandler;
- // This is set to nonzero after the properties callback has been made
- int propertiesCallbackMade;
+ // This is set to nonzero after the properties callback has been made
+ int propertiesCallbackMade;
- // Parser of errors
- ErrorParser errorParser;
+ // Parser of errors
+ ErrorParser errorParser;
} Request;
diff --git a/inc/request_context.h b/inc/request_context.h
index 1aa940b..8074c50 100644
--- a/inc/request_context.h
+++ b/inc/request_context.h
@@ -31,9 +31,9 @@
struct S3RequestContext
{
- CURLM *curlm;
+ CURLM *curlm;
- struct Request *requests;
+ struct Request *requests;
};
diff --git a/inc/response_headers_handler.h b/inc/response_headers_handler.h
index 7491035..2813e9a 100644
--- a/inc/response_headers_handler.h
+++ b/inc/response_headers_handler.h
@@ -34,31 +34,31 @@
typedef struct ResponseHeadersHandler
{
- // The structure to pass to the headers callback. This is filled in by
- // the ResponseHeadersHandler from the headers added to it.
- S3ResponseProperties responseProperties;
+ // The structure to pass to the headers callback. This is filled in by
+ // the ResponseHeadersHandler from the headers added to it.
+ S3ResponseProperties responseProperties;
- // Set to 1 after the done call has been made
- int done;
+ // Set to 1 after the done call has been made
+ int done;
- // copied into here. We allow 128 bytes for each header, plus \0 term.
- string_multibuffer(responsePropertyStrings, 5 * 129);
+ // copied into here. We allow 128 bytes for each header, plus \0 term.
+ string_multibuffer(responsePropertyStrings, 5 * 129);
- // responseproperties.metaHeaders strings get copied into here
- string_multibuffer(responseMetaDataStrings,
- COMPACTED_METADATA_BUFFER_SIZE);
+ // responseproperties.metaHeaders strings get copied into here
+ string_multibuffer(responseMetaDataStrings,
+ COMPACTED_METADATA_BUFFER_SIZE);
- // Response meta data
- S3NameValue responseMetaData[S3_MAX_METADATA_COUNT];
+ // Response meta data
+ S3NameValue responseMetaData[S3_MAX_METADATA_COUNT];
} ResponseHeadersHandler;
void response_headers_handler_initialize(ResponseHeadersHandler *handler);
void response_headers_handler_add(ResponseHeadersHandler *handler,
- char *data, int dataLen);
+ char *data, int dataLen);
void response_headers_handler_done(ResponseHeadersHandler *handler,
- CURL *curl);
+ CURL *curl);
#endif /* RESPONSE_HEADERS_HANDLER_H */
diff --git a/inc/simplexml.h b/inc/simplexml.h
index 74445a2..66d5ef9 100644
--- a/inc/simplexml.h
+++ b/inc/simplexml.h
@@ -41,21 +41,21 @@
//
// data is passed in as 0 on end of element
typedef S3Status (SimpleXmlCallback)(const char *elementPath, const char *data,
- int dataLen, void *callbackData);
+ int dataLen, void *callbackData);
typedef struct SimpleXml
{
- void *xmlParser;
+ void *xmlParser;
- SimpleXmlCallback *callback;
+ SimpleXmlCallback *callback;
- void *callbackData;
+ void *callbackData;
- char elementPath[512];
+ char elementPath[512];
- int elementPathLen;
+ int elementPathLen;
- S3Status status;
+ S3Status status;
} SimpleXml;
@@ -64,7 +64,7 @@ typedef struct SimpleXml
// Always call this, even if the simplexml doesn't end up being used
void simplexml_initialize(SimpleXml *simpleXml, SimpleXmlCallback *callback,
- void *callbackData);
+ void *callbackData);
S3Status simplexml_add(SimpleXml *simpleXml, const char *data, int dataLen);
diff --git a/inc/string_buffer.h b/inc/string_buffer.h
index 1b96b58..eed9bd4 100644
--- a/inc/string_buffer.h
+++ b/inc/string_buffer.h
@@ -31,77 +31,77 @@
// Declare a string_buffer with the given name of the given maximum length
-#define string_buffer(name, len) \
- char name[len + 1]; \
- int name##Len
+#define string_buffer(name, len) \
+ char name[len + 1]; \
+ int name##Len
// Initialize a string_buffer
-#define string_buffer_initialize(sb) \
- do { \
- sb[0] = 0; \
- sb##Len = 0; \
- } while (0)
+#define string_buffer_initialize(sb) \
+ do { \
+ sb[0] = 0; \
+ sb##Len = 0; \
+ } while (0)
// Append [len] bytes of [str] to [sb], setting [all_fit] to 1 if it fit, and
// 0 if it did not
-#define string_buffer_append(sb, str, len, all_fit) \
- do { \
- sb##Len += snprintf(&(sb[sb##Len]), sizeof(sb) - sb##Len - 1, \
- "%.*s", (int) (len), str); \
- if (sb##Len > (int) (sizeof(sb) - 1)) { \
- sb##Len = sizeof(sb) - 1; \
- all_fit = 0; \
- } \
- else { \
- all_fit = 1; \
- } \
- } while (0)
+#define string_buffer_append(sb, str, len, all_fit) \
+ do { \
+ sb##Len += snprintf(&(sb[sb##Len]), sizeof(sb) - sb##Len - 1, \
+ "%.*s", (int) (len), str); \
+ if (sb##Len > (int) (sizeof(sb) - 1)) { \
+ sb##Len = sizeof(sb) - 1; \
+ all_fit = 0; \
+ } \
+ else { \
+ all_fit = 1; \
+ } \
+ } while (0)
// Declare a string multibuffer with the given name of the given maximum size
-#define string_multibuffer(name, size) \
- char name[size]; \
- int name##Size
+#define string_multibuffer(name, size) \
+ char name[size]; \
+ int name##Size
// Initialize a string_multibuffer
-#define string_multibuffer_initialize(smb) \
- do { \
- smb##Size = 0; \
- } while (0)
+#define string_multibuffer_initialize(smb) \
+ do { \
+ smb##Size = 0; \
+ } while (0)
// Evaluates to the current string within the string_multibuffer
-#define string_multibuffer_current(smb) \
- &(smb[smb##Size])
+#define string_multibuffer_current(smb) \
+ &(smb[smb##Size])
// Adds a new string to the string_multibuffer
-#define string_multibuffer_add(smb, str, len, all_fit) \
- do { \
- smb##Size += (snprintf(&(smb[smb##Size]), \
- sizeof(smb) - smb##Size, \
- "%.*s", (int) (len), str) + 1); \
- if (smb##Size > (int) sizeof(smb)) { \
- smb##Size = sizeof(smb); \
- all_fit = 0; \
- } \
- else { \
- all_fit = 1; \
- } \
- } while (0)
-
-
-// Appends to the current string in the string_multibuffer. There must be a
+#define string_multibuffer_add(smb, str, len, all_fit) \
+ do { \
+ smb##Size += (snprintf(&(smb[smb##Size]), \
+ sizeof(smb) - smb##Size, \
+ "%.*s", (int) (len), str) + 1); \
+ if (smb##Size > (int) sizeof(smb)) { \
+ smb##Size = sizeof(smb); \
+ all_fit = 0; \
+ } \
+ else { \
+ all_fit = 1; \
+ } \
+ } while (0)
+
+
+// Appends to the current string in the string_multibuffer. There must be a
// current string, meaning that string_multibuffer_add must have been called
// at least once for this string_multibuffer.
-#define string_multibuffer_append(smb, str, len, all_fit) \
- do { \
- smb##Size--; \
- string_multibuffer_add(smb, str, len, all_fit); \
- } while (0)
+#define string_multibuffer_append(smb, str, len, all_fit) \
+ do { \
+ smb##Size--; \
+ string_multibuffer_add(smb, str, len, all_fit); \
+ } while (0)
#endif /* STRING_BUFFER_H */
diff --git a/inc/util.h b/inc/util.h
index e38a5e8..0ed580e 100644
--- a/inc/util.h
+++ b/inc/util.h
@@ -38,7 +38,7 @@
// This is the maximum number of bytes needed in a "compacted meta header"
// buffer, which is a buffer storing all of the compacted meta headers.
#define COMPACTED_METADATA_BUFFER_SIZE \
- (S3_MAX_METADATA_COUNT * sizeof(S3_METADATA_HEADER_NAME_PREFIX "n: v"))
+ (S3_MAX_METADATA_COUNT * sizeof(S3_METADATA_HEADER_NAME_PREFIX "n: v"))
// Maximum url encoded key size; since every single character could require
// URL encoding, it's 3 times the size of a key (since each url encoded
@@ -49,18 +49,18 @@
// https://s3.amazonaws.com/${BUCKET}/${KEY}?acl
// 255 is the maximum bucket length
#define MAX_URI_SIZE \
- ((sizeof("https://" S3_HOSTNAME "/") - 1) + 255 + 1 + \
- MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent" - 1)) + 1)
+ ((sizeof("https://" S3_HOSTNAME "/") - 1) + 255 + 1 + \
+ MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent" - 1)) + 1)
// Maximum size of a canonicalized resource
#define MAX_CANONICALIZED_RESOURCE_SIZE \
- (1 + 255 + 1 + MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent") - 1) + 1)
+ (1 + 255 + 1 + MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent") - 1) + 1)
// Utilities -----------------------------------------------------------------
-// URL-encodes a string from [src] into [dest]. [dest] must have at least
-// 3x the number of characters that [source] has. At most [maxSrcSize] bytes
+// URL-encodes a string from [src] into [dest]. [dest] must have at least
+// 3x the number of characters that [source] has. At most [maxSrcSize] bytes
// from [src] are encoded; if more are present in [src], 0 is returned from
// urlEncode, else nonzero is returned.
int urlEncode(char *dest, const char *src, int maxSrcSize);
@@ -70,15 +70,15 @@ int64_t parseIso8601Time(const char *str);
uint64_t parseUnsignedInt(const char *str);
-// base64 encode bytes. The output buffer must have at least
-// ((4 * (inLen + 1)) / 3) bytes in it. Returns the number of bytes written
+// base64 encode bytes. The output buffer must have at least
+// ((4 * (inLen + 1)) / 3) bytes in it. Returns the number of bytes written
// to [out].
int base64Encode(const unsigned char *in, int inLen, char *out);
// Compute HMAC-SHA-1 with key [key] and message [message], storing result
// in [hmac]
void HMAC_SHA1(unsigned char hmac[20], const unsigned char *key, int key_len,
- const unsigned char *message, int message_len);
+ const unsigned char *message, int message_len);
// Compute a 64-bit hash values given a set of bytes
uint64_t hash(const unsigned char *k, int length);
diff --git a/src/acl.c b/src/acl.c
index 442a386..2a8272b 100644
--- a/src/acl.c
+++ b/src/acl.c
@@ -37,313 +37,313 @@
typedef struct GetAclData
{
- SimpleXml simpleXml;
+ SimpleXml simpleXml;
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- int *aclGrantCountReturn;
- S3AclGrant *aclGrants;
- char *ownerId;
- char *ownerDisplayName;
- string_buffer(aclXmlDocument, ACL_XML_DOC_MAXSIZE);
+ int *aclGrantCountReturn;
+ S3AclGrant *aclGrants;
+ char *ownerId;
+ char *ownerDisplayName;
+ string_buffer(aclXmlDocument, ACL_XML_DOC_MAXSIZE);
} GetAclData;
static S3Status getAclPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- GetAclData *gaData = (GetAclData *) callbackData;
-
- return (*(gaData->responsePropertiesCallback))
- (responseProperties, gaData->callbackData);
+ GetAclData *gaData = (GetAclData *) callbackData;
+
+ return (*(gaData->responsePropertiesCallback))
+ (responseProperties, gaData->callbackData);
}
static S3Status getAclDataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- GetAclData *gaData = (GetAclData *) callbackData;
+ GetAclData *gaData = (GetAclData *) callbackData;
- int fit;
+ int fit;
- string_buffer_append(gaData->aclXmlDocument, buffer, bufferSize, fit);
-
- return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge;
+ string_buffer_append(gaData->aclXmlDocument, buffer, bufferSize, fit);
+
+ return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge;
}
static void getAclCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- GetAclData *gaData = (GetAclData *) callbackData;
+ GetAclData *gaData = (GetAclData *) callbackData;
- if (requestStatus == S3StatusOK) {
- // Parse the document
- requestStatus = S3_convert_acl
- (gaData->aclXmlDocument, gaData->ownerId, gaData->ownerDisplayName,
- gaData->aclGrantCountReturn, gaData->aclGrants);
- }
+ if (requestStatus == S3StatusOK) {
+ // Parse the document
+ requestStatus = S3_convert_acl
+ (gaData->aclXmlDocument, gaData->ownerId, gaData->ownerDisplayName,
+ gaData->aclGrantCountReturn, gaData->aclGrants);
+ }
- (*(gaData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, gaData->callbackData);
+ (*(gaData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, gaData->callbackData);
- free(gaData);
+ free(gaData);
}
void S3_get_acl(const S3BucketContext *bucketContext, const char *key,
- char *ownerId, char *ownerDisplayName,
- int *aclGrantCountReturn, S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ char *ownerId, char *ownerDisplayName,
+ int *aclGrantCountReturn, S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Create the callback data
- GetAclData *gaData = (GetAclData *) malloc(sizeof(GetAclData));
- if (!gaData) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- gaData->responsePropertiesCallback = handler->propertiesCallback;
- gaData->responseCompleteCallback = handler->completeCallback;
- gaData->callbackData = callbackData;
-
- gaData->aclGrantCountReturn = aclGrantCountReturn;
- gaData->aclGrants = aclGrants;
- gaData->ownerId = ownerId;
- gaData->ownerDisplayName = ownerDisplayName;
- string_buffer_initialize(gaData->aclXmlDocument);
- *aclGrantCountReturn = 0;
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeGET, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- key, // key
- 0, // queryParams
- "acl", // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &getAclPropertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- &getAclDataCallback, // fromS3Callback
- &getAclCompleteCallback, // completeCallback
- gaData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create the callback data
+ GetAclData *gaData = (GetAclData *) malloc(sizeof(GetAclData));
+ if (!gaData) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ gaData->responsePropertiesCallback = handler->propertiesCallback;
+ gaData->responseCompleteCallback = handler->completeCallback;
+ gaData->callbackData = callbackData;
+
+ gaData->aclGrantCountReturn = aclGrantCountReturn;
+ gaData->aclGrants = aclGrants;
+ gaData->ownerId = ownerId;
+ gaData->ownerDisplayName = ownerDisplayName;
+ string_buffer_initialize(gaData->aclXmlDocument);
+ *aclGrantCountReturn = 0;
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeGET, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ key, // key
+ 0, // queryParams
+ "acl", // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &getAclPropertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ &getAclDataCallback, // fromS3Callback
+ &getAclCompleteCallback, // completeCallback
+ gaData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
// set acl -------------------------------------------------------------------
static S3Status generateAclXmlDocument(const char *ownerId,
- const char *ownerDisplayName,
- int aclGrantCount,
- const S3AclGrant *aclGrants,
- int *xmlDocumentLenReturn,
- char *xmlDocument,
- int xmlDocumentBufferSize)
+ const char *ownerDisplayName,
+ int aclGrantCount,
+ const S3AclGrant *aclGrants,
+ int *xmlDocumentLenReturn,
+ char *xmlDocument,
+ int xmlDocumentBufferSize)
{
- *xmlDocumentLenReturn = 0;
-
-#define append(fmt, ...) \
- do { \
- *xmlDocumentLenReturn += snprintf \
- (&(xmlDocument[*xmlDocumentLenReturn]), \
- xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \
- fmt, __VA_ARGS__); \
- if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) { \
- return S3StatusXmlDocumentTooLarge; \
- } \
- } while (0)
-
- append("<AccessControlPolicy><Owner><ID>%s</ID><DisplayName>%s"
- "</DisplayName></Owner><AccessControlList>", ownerId,
- ownerDisplayName);
-
- int i;
- for (i = 0; i < aclGrantCount; i++) {
- append("%s", "<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/"
- "XMLSchema-instance\" xsi:type=\"");
- const S3AclGrant *grant = &(aclGrants[i]);
- switch (grant->granteeType) {
- case S3GranteeTypeAmazonCustomerByEmail:
- append("AmazonCustomerByEmail\"><EmailAddress>%s</EmailAddress>",
- grant->grantee.amazonCustomerByEmail.emailAddress);
- break;
- case S3GranteeTypeCanonicalUser:
- append("CanonicalUser\"><ID>%s</ID><DisplayName>%s</DisplayName>",
- grant->grantee.canonicalUser.id,
- grant->grantee.canonicalUser.displayName);
- break;
- default: { // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers:
- const char *grantee;
- switch (grant->granteeType) {
- case S3GranteeTypeAllAwsUsers:
- grantee = "http://acs.amazonaws.com/groups/global/"
- "AuthenticatedUsers";
- break;
- case S3GranteeTypeAllUsers:
- grantee = "http://acs.amazonaws.com/groups/global/"
- "AllUsers";
- break;
- default:
- grantee = "http://acs.amazonaws.com/groups/s3/"
- "LogDelivery";
- break;
- }
- append("Group\"><URI>%s</URI>", grantee);
- }
- break;
- }
- append("</Grantee><Permission>%s</Permission></Grant>",
- ((grant->permission == S3PermissionRead) ? "READ" :
- (grant->permission == S3PermissionWrite) ? "WRITE" :
- (grant->permission == S3PermissionReadACP) ? "READ_ACP" :
- (grant->permission == S3PermissionWriteACP) ? "WRITE_ACP" :
- "FULL_CONTROL"));
- }
-
- append("%s", "</AccessControlList></AccessControlPolicy>");
-
- return S3StatusOK;
+ *xmlDocumentLenReturn = 0;
+
+#define append(fmt, ...) \
+ do { \
+ *xmlDocumentLenReturn += snprintf \
+ (&(xmlDocument[*xmlDocumentLenReturn]), \
+ xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \
+ fmt, __VA_ARGS__); \
+ if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) { \
+ return S3StatusXmlDocumentTooLarge; \
+ } \
+ } while (0)
+
+ append("<AccessControlPolicy><Owner><ID>%s</ID><DisplayName>%s"
+ "</DisplayName></Owner><AccessControlList>", ownerId,
+ ownerDisplayName);
+
+ int i;
+ for (i = 0; i < aclGrantCount; i++) {
+ append("%s", "<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/"
+ "XMLSchema-instance\" xsi:type=\"");
+ const S3AclGrant *grant = &(aclGrants[i]);
+ switch (grant->granteeType) {
+ case S3GranteeTypeAmazonCustomerByEmail:
+ append("AmazonCustomerByEmail\"><EmailAddress>%s</EmailAddress>",
+ grant->grantee.amazonCustomerByEmail.emailAddress);
+ break;
+ case S3GranteeTypeCanonicalUser:
+ append("CanonicalUser\"><ID>%s</ID><DisplayName>%s</DisplayName>",
+ grant->grantee.canonicalUser.id,
+ grant->grantee.canonicalUser.displayName);
+ break;
+ default: { // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers:
+ const char *grantee;
+ switch (grant->granteeType) {
+ case S3GranteeTypeAllAwsUsers:
+ grantee = "http://acs.amazonaws.com/groups/global/"
+ "AuthenticatedUsers";
+ break;
+ case S3GranteeTypeAllUsers:
+ grantee = "http://acs.amazonaws.com/groups/global/"
+ "AllUsers";
+ break;
+ default:
+ grantee = "http://acs.amazonaws.com/groups/s3/"
+ "LogDelivery";
+ break;
+ }
+ append("Group\"><URI>%s</URI>", grantee);
+ }
+ break;
+ }
+ append("</Grantee><Permission>%s</Permission></Grant>",
+ ((grant->permission == S3PermissionRead) ? "READ" :
+ (grant->permission == S3PermissionWrite) ? "WRITE" :
+ (grant->permission == S3PermissionReadACP) ? "READ_ACP" :
+ (grant->permission == S3PermissionWriteACP) ? "WRITE_ACP" :
+ "FULL_CONTROL"));
+ }
+
+ append("%s", "</AccessControlList></AccessControlPolicy>");
+
+ return S3StatusOK;
}
typedef struct SetAclData
{
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- int aclXmlDocumentLen;
- char aclXmlDocument[ACL_XML_DOC_MAXSIZE];
- int aclXmlDocumentBytesWritten;
+ int aclXmlDocumentLen;
+ char aclXmlDocument[ACL_XML_DOC_MAXSIZE];
+ int aclXmlDocumentBytesWritten;
} SetAclData;
static S3Status setAclPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- SetAclData *paData = (SetAclData *) callbackData;
-
- return (*(paData->responsePropertiesCallback))
- (responseProperties, paData->callbackData);
+ SetAclData *paData = (SetAclData *) callbackData;
+
+ return (*(paData->responsePropertiesCallback))
+ (responseProperties, paData->callbackData);
}
static int setAclDataCallback(int bufferSize, char *buffer, void *callbackData)
{
- SetAclData *paData = (SetAclData *) callbackData;
+ SetAclData *paData = (SetAclData *) callbackData;
- int remaining = (paData->aclXmlDocumentLen -
- paData->aclXmlDocumentBytesWritten);
+ int remaining = (paData->aclXmlDocumentLen -
+ paData->aclXmlDocumentBytesWritten);
- int toCopy = bufferSize > remaining ? remaining : bufferSize;
-
- if (!toCopy) {
- return 0;
- }
+ int toCopy = bufferSize > remaining ? remaining : bufferSize;
+
+ if (!toCopy) {
+ return 0;
+ }
- memcpy(buffer, &(paData->aclXmlDocument
- [paData->aclXmlDocumentBytesWritten]), toCopy);
+ memcpy(buffer, &(paData->aclXmlDocument
+ [paData->aclXmlDocumentBytesWritten]), toCopy);
- paData->aclXmlDocumentBytesWritten += toCopy;
+ paData->aclXmlDocumentBytesWritten += toCopy;
- return toCopy;
+ return toCopy;
}
static void setAclCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- SetAclData *paData = (SetAclData *) callbackData;
+ SetAclData *paData = (SetAclData *) callbackData;
- (*(paData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, paData->callbackData);
+ (*(paData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, paData->callbackData);
- free(paData);
+ free(paData);
}
void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
- const char *ownerId, const char *ownerDisplayName,
- int aclGrantCount, const S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ const char *ownerId, const char *ownerDisplayName,
+ int aclGrantCount, const S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) {
- (*(handler->completeCallback))
- (S3StatusTooManyGrants, 0, callbackData);
- return;
- }
-
- SetAclData *data = (SetAclData *) malloc(sizeof(SetAclData));
- if (!data) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- // Convert aclGrants to XML document
- S3Status status = generateAclXmlDocument
- (ownerId, ownerDisplayName, aclGrantCount, aclGrants,
- &(data->aclXmlDocumentLen), data->aclXmlDocument,
- sizeof(data->aclXmlDocument));
- if (status != S3StatusOK) {
- free(data);
- (*(handler->completeCallback))(status, 0, callbackData);
- return;
- }
-
- data->responsePropertiesCallback = handler->propertiesCallback;
- data->responseCompleteCallback = handler->completeCallback;
- data->callbackData = callbackData;
-
- data->aclXmlDocumentBytesWritten = 0;
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypePUT, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- key, // key
- 0, // queryParams
- "acl", // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &setAclPropertiesCallback, // propertiesCallback
- &setAclDataCallback, // toS3Callback
- data->aclXmlDocumentLen, // toS3CallbackTotalSize
- 0, // fromS3Callback
- &setAclCompleteCallback, // completeCallback
- data // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) {
+ (*(handler->completeCallback))
+ (S3StatusTooManyGrants, 0, callbackData);
+ return;
+ }
+
+ SetAclData *data = (SetAclData *) malloc(sizeof(SetAclData));
+ if (!data) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ // Convert aclGrants to XML document
+ S3Status status = generateAclXmlDocument
+ (ownerId, ownerDisplayName, aclGrantCount, aclGrants,
+ &(data->aclXmlDocumentLen), data->aclXmlDocument,
+ sizeof(data->aclXmlDocument));
+ if (status != S3StatusOK) {
+ free(data);
+ (*(handler->completeCallback))(status, 0, callbackData);
+ return;
+ }
+
+ data->responsePropertiesCallback = handler->propertiesCallback;
+ data->responseCompleteCallback = handler->completeCallback;
+ data->callbackData = callbackData;
+
+ data->aclXmlDocumentBytesWritten = 0;
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypePUT, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ key, // key
+ 0, // queryParams
+ "acl", // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &setAclPropertiesCallback, // propertiesCallback
+ &setAclDataCallback, // toS3Callback
+ data->aclXmlDocumentLen, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ &setAclCompleteCallback, // completeCallback
+ data // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
diff --git a/src/bucket.c b/src/bucket.c
index 379b8ba..9dcc48c 100644
--- a/src/bucket.c
+++ b/src/bucket.c
@@ -34,127 +34,127 @@
typedef struct TestBucketData
{
- SimpleXml simpleXml;
+ SimpleXml simpleXml;
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- int locationConstraintReturnSize;
- char *locationConstraintReturn;
+ int locationConstraintReturnSize;
+ char *locationConstraintReturn;
- string_buffer(locationConstraint, 256);
+ string_buffer(locationConstraint, 256);
} TestBucketData;
static S3Status testBucketXmlCallback(const char *elementPath,
- const char *data, int dataLen,
- void *callbackData)
+ const char *data, int dataLen,
+ void *callbackData)
{
- TestBucketData *tbData = (TestBucketData *) callbackData;
+ TestBucketData *tbData = (TestBucketData *) callbackData;
- int fit;
+ int fit;
- if (data && !strcmp(elementPath, "LocationConstraint")) {
- string_buffer_append(tbData->locationConstraint, data, dataLen, fit);
- }
+ if (data && !strcmp(elementPath, "LocationConstraint")) {
+ string_buffer_append(tbData->locationConstraint, data, dataLen, fit);
+ }
- return S3StatusOK;
+ return S3StatusOK;
}
static S3Status testBucketPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- TestBucketData *tbData = (TestBucketData *) callbackData;
-
- return (*(tbData->responsePropertiesCallback))
- (responseProperties, tbData->callbackData);
+ TestBucketData *tbData = (TestBucketData *) callbackData;
+
+ return (*(tbData->responsePropertiesCallback))
+ (responseProperties, tbData->callbackData);
}
static S3Status testBucketDataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- TestBucketData *tbData = (TestBucketData *) callbackData;
+ TestBucketData *tbData = (TestBucketData *) callbackData;
- return simplexml_add(&(tbData->simpleXml), buffer, bufferSize);
+ return simplexml_add(&(tbData->simpleXml), buffer, bufferSize);
}
static void testBucketCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- TestBucketData *tbData = (TestBucketData *) callbackData;
+ TestBucketData *tbData = (TestBucketData *) callbackData;
- // Copy the location constraint into the return buffer
- snprintf(tbData->locationConstraintReturn,
- tbData->locationConstraintReturnSize, "%s",
- tbData->locationConstraint);
+ // Copy the location constraint into the return buffer
+ snprintf(tbData->locationConstraintReturn,
+ tbData->locationConstraintReturnSize, "%s",
+ tbData->locationConstraint);
- (*(tbData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, tbData->callbackData);
+ (*(tbData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, tbData->callbackData);
- simplexml_deinitialize(&(tbData->simpleXml));
+ simplexml_deinitialize(&(tbData->simpleXml));
- free(tbData);
+ free(tbData);
}
void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
- const char *accessKeyId, const char *secretAccessKey,
- const char *bucketName, int locationConstraintReturnSize,
- char *locationConstraintReturn,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ const char *accessKeyId, const char *secretAccessKey,
+ const char *bucketName, int locationConstraintReturnSize,
+ char *locationConstraintReturn,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Create the callback data
- TestBucketData *tbData =
- (TestBucketData *) malloc(sizeof(TestBucketData));
- if (!tbData) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- simplexml_initialize(&(tbData->simpleXml), &testBucketXmlCallback, tbData);
-
- tbData->responsePropertiesCallback = handler->propertiesCallback;
- tbData->responseCompleteCallback = handler->completeCallback;
- tbData->callbackData = callbackData;
-
- tbData->locationConstraintReturnSize = locationConstraintReturnSize;
- tbData->locationConstraintReturn = locationConstraintReturn;
- string_buffer_initialize(tbData->locationConstraint);
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeGET, // httpRequestType
- { bucketName, // bucketName
- protocol, // protocol
- uriStyle, // uriStyle
- accessKeyId, // accessKeyId
- secretAccessKey }, // secretAccessKey
- 0, // key
- 0, // queryParams
- "location", // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &testBucketPropertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- &testBucketDataCallback, // fromS3Callback
- &testBucketCompleteCallback, // completeCallback
- tbData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create the callback data
+ TestBucketData *tbData =
+ (TestBucketData *) malloc(sizeof(TestBucketData));
+ if (!tbData) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ simplexml_initialize(&(tbData->simpleXml), &testBucketXmlCallback, tbData);
+
+ tbData->responsePropertiesCallback = handler->propertiesCallback;
+ tbData->responseCompleteCallback = handler->completeCallback;
+ tbData->callbackData = callbackData;
+
+ tbData->locationConstraintReturnSize = locationConstraintReturnSize;
+ tbData->locationConstraintReturn = locationConstraintReturn;
+ string_buffer_initialize(tbData->locationConstraint);
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeGET, // httpRequestType
+ { bucketName, // bucketName
+ protocol, // protocol
+ uriStyle, // uriStyle
+ accessKeyId, // accessKeyId
+ secretAccessKey }, // secretAccessKey
+ 0, // key
+ 0, // queryParams
+ "location", // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &testBucketPropertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ &testBucketDataCallback, // fromS3Callback
+ &testBucketCompleteCallback, // completeCallback
+ tbData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
@@ -162,217 +162,217 @@ void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
typedef struct CreateBucketData
{
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- char doc[1024];
- int docLen, docBytesWritten;
-} CreateBucketData;
-
+ char doc[1024];
+ int docLen, docBytesWritten;
+} CreateBucketData;
+
static S3Status createBucketPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- CreateBucketData *cbData = (CreateBucketData *) callbackData;
-
- return (*(cbData->responsePropertiesCallback))
- (responseProperties, cbData->callbackData);
+ CreateBucketData *cbData = (CreateBucketData *) callbackData;
+
+ return (*(cbData->responsePropertiesCallback))
+ (responseProperties, cbData->callbackData);
}
static int createBucketDataCallback(int bufferSize, char *buffer,
- void *callbackData)
+ void *callbackData)
{
- CreateBucketData *cbData = (CreateBucketData *) callbackData;
+ CreateBucketData *cbData = (CreateBucketData *) callbackData;
- if (!cbData->docLen) {
- return 0;
- }
+ if (!cbData->docLen) {
+ return 0;
+ }
- int remaining = (cbData->docLen - cbData->docBytesWritten);
+ int remaining = (cbData->docLen - cbData->docBytesWritten);
- int toCopy = bufferSize > remaining ? remaining : bufferSize;
-
- if (!toCopy) {
- return 0;
- }
+ int toCopy = bufferSize > remaining ? remaining : bufferSize;
+
+ if (!toCopy) {
+ return 0;
+ }
- memcpy(buffer, &(cbData->doc[cbData->docBytesWritten]), toCopy);
+ memcpy(buffer, &(cbData->doc[cbData->docBytesWritten]), toCopy);
- cbData->docBytesWritten += toCopy;
+ cbData->docBytesWritten += toCopy;
- return toCopy;
+ return toCopy;
}
static void createBucketCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- CreateBucketData *cbData = (CreateBucketData *) callbackData;
+ CreateBucketData *cbData = (CreateBucketData *) callbackData;
- (*(cbData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, cbData->callbackData);
+ (*(cbData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, cbData->callbackData);
- free(cbData);
+ free(cbData);
}
void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
- const char *secretAccessKey, const char *bucketName,
- S3CannedAcl cannedAcl, const char *locationConstraint,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ const char *secretAccessKey, const char *bucketName,
+ S3CannedAcl cannedAcl, const char *locationConstraint,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Create the callback data
- CreateBucketData *cbData =
- (CreateBucketData *) malloc(sizeof(CreateBucketData));
- if (!cbData) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- cbData->responsePropertiesCallback = handler->propertiesCallback;
- cbData->responseCompleteCallback = handler->completeCallback;
- cbData->callbackData = callbackData;
-
- if (locationConstraint) {
- cbData->docLen =
- snprintf(cbData->doc, sizeof(cbData->doc),
- "<CreateBucketConfiguration><LocationConstraint>"
- "%s</LocationConstraint></CreateBucketConfiguration>",
- locationConstraint);
- cbData->docBytesWritten = 0;
- }
- else {
- cbData->docLen = 0;
- }
-
- // Set up S3PutProperties
- S3PutProperties properties =
- {
- 0, // contentType
- 0, // md5
- 0, // cacheControl
- 0, // contentDispositionFilename
- 0, // contentEncoding
- 0, // expires
- cannedAcl, // cannedAcl
- 0, // metaDataCount
- 0 // metaData
- };
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypePUT, // httpRequestType
- { bucketName, // bucketName
- protocol, // protocol
- S3UriStylePath, // uriStyle
- accessKeyId, // accessKeyId
- secretAccessKey }, // secretAccessKey
- 0, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- &properties, // putProperties
- &createBucketPropertiesCallback, // propertiesCallback
- &createBucketDataCallback, // toS3Callback
- cbData->docLen, // toS3CallbackTotalSize
- 0, // fromS3Callback
- &createBucketCompleteCallback, // completeCallback
- cbData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create the callback data
+ CreateBucketData *cbData =
+ (CreateBucketData *) malloc(sizeof(CreateBucketData));
+ if (!cbData) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ cbData->responsePropertiesCallback = handler->propertiesCallback;
+ cbData->responseCompleteCallback = handler->completeCallback;
+ cbData->callbackData = callbackData;
+
+ if (locationConstraint) {
+ cbData->docLen =
+ snprintf(cbData->doc, sizeof(cbData->doc),
+ "<CreateBucketConfiguration><LocationConstraint>"
+ "%s</LocationConstraint></CreateBucketConfiguration>",
+ locationConstraint);
+ cbData->docBytesWritten = 0;
+ }
+ else {
+ cbData->docLen = 0;
+ }
+
+ // Set up S3PutProperties
+ S3PutProperties properties =
+ {
+ 0, // contentType
+ 0, // md5
+ 0, // cacheControl
+ 0, // contentDispositionFilename
+ 0, // contentEncoding
+ 0, // expires
+ cannedAcl, // cannedAcl
+ 0, // metaDataCount
+ 0 // metaData
+ };
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypePUT, // httpRequestType
+ { bucketName, // bucketName
+ protocol, // protocol
+ S3UriStylePath, // uriStyle
+ accessKeyId, // accessKeyId
+ secretAccessKey }, // secretAccessKey
+ 0, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ &properties, // putProperties
+ &createBucketPropertiesCallback, // propertiesCallback
+ &createBucketDataCallback, // toS3Callback
+ cbData->docLen, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ &createBucketCompleteCallback, // completeCallback
+ cbData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
-
+
// delete bucket -------------------------------------------------------------
typedef struct DeleteBucketData
{
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
} DeleteBucketData;
static S3Status deleteBucketPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
-
- return (*(dbData->responsePropertiesCallback))
- (responseProperties, dbData->callbackData);
+ DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
+
+ return (*(dbData->responsePropertiesCallback))
+ (responseProperties, dbData->callbackData);
}
static void deleteBucketCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
+ DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
- (*(dbData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, dbData->callbackData);
+ (*(dbData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, dbData->callbackData);
- free(dbData);
+ free(dbData);
}
void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
- const char *accessKeyId, const char *secretAccessKey,
- const char *bucketName,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ const char *accessKeyId, const char *secretAccessKey,
+ const char *bucketName,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Create the callback data
- DeleteBucketData *dbData =
- (DeleteBucketData *) malloc(sizeof(DeleteBucketData));
- if (!dbData) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- dbData->responsePropertiesCallback = handler->propertiesCallback;
- dbData->responseCompleteCallback = handler->completeCallback;
- dbData->callbackData = callbackData;
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeDELETE, // httpRequestType
- { bucketName, // bucketName
- protocol, // protocol
- uriStyle, // uriStyle
- accessKeyId, // accessKeyId
- secretAccessKey }, // secretAccessKey
- 0, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &deleteBucketPropertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- 0, // fromS3Callback
- &deleteBucketCompleteCallback, // completeCallback
- dbData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create the callback data
+ DeleteBucketData *dbData =
+ (DeleteBucketData *) malloc(sizeof(DeleteBucketData));
+ if (!dbData) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ dbData->responsePropertiesCallback = handler->propertiesCallback;
+ dbData->responseCompleteCallback = handler->completeCallback;
+ dbData->callbackData = callbackData;
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeDELETE, // httpRequestType
+ { bucketName, // bucketName
+ protocol, // protocol
+ uriStyle, // uriStyle
+ accessKeyId, // accessKeyId
+ secretAccessKey }, // secretAccessKey
+ 0, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &deleteBucketPropertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ &deleteBucketCompleteCallback, // completeCallback
+ dbData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
@@ -380,23 +380,23 @@ void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
typedef struct ListBucketContents
{
- string_buffer(key, 1024);
- string_buffer(lastModified, 256);
- string_buffer(eTag, 256);
- string_buffer(size, 24);
- string_buffer(ownerId, 256);
- string_buffer(ownerDisplayName, 256);
+ string_buffer(key, 1024);
+ string_buffer(lastModified, 256);
+ string_buffer(eTag, 256);
+ string_buffer(size, 24);
+ string_buffer(ownerId, 256);
+ string_buffer(ownerDisplayName, 256);
} ListBucketContents;
static void initialize_list_bucket_contents(ListBucketContents *contents)
{
- string_buffer_initialize(contents->key);
- string_buffer_initialize(contents->lastModified);
- string_buffer_initialize(contents->eTag);
- string_buffer_initialize(contents->size);
- string_buffer_initialize(contents->ownerId);
- string_buffer_initialize(contents->ownerDisplayName);
+ string_buffer_initialize(contents->key);
+ string_buffer_initialize(contents->lastModified);
+ string_buffer_initialize(contents->eTag);
+ string_buffer_initialize(contents->size);
+ string_buffer_initialize(contents->ownerId);
+ string_buffer_initialize(contents->ownerDisplayName);
}
// We read up to 32 Contents at a time
@@ -406,326 +406,326 @@ static void initialize_list_bucket_contents(ListBucketContents *contents)
typedef struct ListBucketData
{
- SimpleXml simpleXml;
+ SimpleXml simpleXml;
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ListBucketCallback *listBucketCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ListBucketCallback *listBucketCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- string_buffer(isTruncated, 64);
- string_buffer(nextMarker, 1024);
+ string_buffer(isTruncated, 64);
+ string_buffer(nextMarker, 1024);
- int contentsCount;
- ListBucketContents contents[MAX_CONTENTS];
+ int contentsCount;
+ ListBucketContents contents[MAX_CONTENTS];
- int commonPrefixesCount;
- char commonPrefixes[MAX_COMMON_PREFIXES][1024];
- int commonPrefixLens[MAX_COMMON_PREFIXES];
+ int commonPrefixesCount;
+ char commonPrefixes[MAX_COMMON_PREFIXES][1024];
+ int commonPrefixLens[MAX_COMMON_PREFIXES];
} ListBucketData;
static void initialize_list_bucket_data(ListBucketData *lbData)
{
- lbData->contentsCount = 0;
- initialize_list_bucket_contents(lbData->contents);
- lbData->commonPrefixesCount = 0;
- lbData->commonPrefixes[0][0] = 0;
- lbData->commonPrefixLens[0] = 0;
+ lbData->contentsCount = 0;
+ initialize_list_bucket_contents(lbData->contents);
+ lbData->commonPrefixesCount = 0;
+ lbData->commonPrefixes[0][0] = 0;
+ lbData->commonPrefixLens[0] = 0;
}
static S3Status make_list_bucket_callback(ListBucketData *lbData)
{
- int i;
-
- // Convert IsTruncated
- int isTruncated = (!strcmp(lbData->isTruncated, "true") ||
- !strcmp(lbData->isTruncated, "1")) ? 1 : 0;
-
- // Convert the contents
- S3ListBucketContent contents[lbData->contentsCount];
-
- int contentsCount = lbData->contentsCount;
- for (i = 0; i < contentsCount; i++) {
- S3ListBucketContent *contentDest = &(contents[i]);
- ListBucketContents *contentSrc = &(lbData->contents[i]);
- contentDest->key = contentSrc->key;
- contentDest->lastModified =
- parseIso8601Time(contentSrc->lastModified);
- contentDest->eTag = contentSrc->eTag;
- contentDest->size = parseUnsignedInt(contentSrc->size);
- contentDest->ownerId =
- contentSrc->ownerId[0] ?contentSrc->ownerId : 0;
- contentDest->ownerDisplayName = (contentSrc->ownerDisplayName[0] ?
- contentSrc->ownerDisplayName : 0);
- }
-
- // Make the common prefixes array
- int commonPrefixesCount = lbData->commonPrefixesCount;
- char *commonPrefixes[commonPrefixesCount];
- for (i = 0; i < commonPrefixesCount; i++) {
- commonPrefixes[i] = lbData->commonPrefixes[i];
- }
-
- return (*(lbData->listBucketCallback))
- (isTruncated, lbData->nextMarker,
- contentsCount, contents, commonPrefixesCount,
- (const char **) commonPrefixes, lbData->callbackData);
+ int i;
+
+ // Convert IsTruncated
+ int isTruncated = (!strcmp(lbData->isTruncated, "true") ||
+ !strcmp(lbData->isTruncated, "1")) ? 1 : 0;
+
+ // Convert the contents
+ S3ListBucketContent contents[lbData->contentsCount];
+
+ int contentsCount = lbData->contentsCount;
+ for (i = 0; i < contentsCount; i++) {
+ S3ListBucketContent *contentDest = &(contents[i]);
+ ListBucketContents *contentSrc = &(lbData->contents[i]);
+ contentDest->key = contentSrc->key;
+ contentDest->lastModified =
+ parseIso8601Time(contentSrc->lastModified);
+ contentDest->eTag = contentSrc->eTag;
+ contentDest->size = parseUnsignedInt(contentSrc->size);
+ contentDest->ownerId =
+ contentSrc->ownerId[0] ?contentSrc->ownerId : 0;
+ contentDest->ownerDisplayName = (contentSrc->ownerDisplayName[0] ?
+ contentSrc->ownerDisplayName : 0);
+ }
+
+ // Make the common prefixes array
+ int commonPrefixesCount = lbData->commonPrefixesCount;
+ char *commonPrefixes[commonPrefixesCount];
+ for (i = 0; i < commonPrefixesCount; i++) {
+ commonPrefixes[i] = lbData->commonPrefixes[i];
+ }
+
+ return (*(lbData->listBucketCallback))
+ (isTruncated, lbData->nextMarker,
+ contentsCount, contents, commonPrefixesCount,
+ (const char **) commonPrefixes, lbData->callbackData);
}
static S3Status listBucketXmlCallback(const char *elementPath,
- const char *data, int dataLen,
- void *callbackData)
+ const char *data, int dataLen,
+ void *callbackData)
{
- ListBucketData *lbData = (ListBucketData *) callbackData;
-
- int fit;
-
- if (data) {
- if (!strcmp(elementPath, "ListBucketResult/IsTruncated")) {
- string_buffer_append(lbData->isTruncated, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "ListBucketResult/NextMarker")) {
- string_buffer_append(lbData->nextMarker, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "ListBucketResult/Contents/Key")) {
- ListBucketContents *contents =
- &(lbData->contents[lbData->contentsCount]);
- string_buffer_append(contents->key, data, dataLen, fit);
- }
- else if (!strcmp(elementPath,
- "ListBucketResult/Contents/LastModified")) {
- ListBucketContents *contents =
- &(lbData->contents[lbData->contentsCount]);
- string_buffer_append(contents->lastModified, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "ListBucketResult/Contents/ETag")) {
- ListBucketContents *contents =
- &(lbData->contents[lbData->contentsCount]);
- string_buffer_append(contents->eTag, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "ListBucketResult/Contents/Size")) {
- ListBucketContents *contents =
- &(lbData->contents[lbData->contentsCount]);
- string_buffer_append(contents->size, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "ListBucketResult/Contents/Owner/ID")) {
- ListBucketContents *contents =
- &(lbData->contents[lbData->contentsCount]);
- string_buffer_append(contents->ownerId, data, dataLen, fit);
- }
- else if (!strcmp(elementPath,
- "ListBucketResult/Contents/Owner/DisplayName")) {
- ListBucketContents *contents =
- &(lbData->contents[lbData->contentsCount]);
- string_buffer_append
- (contents->ownerDisplayName, data, dataLen, fit);
- }
- else if (!strcmp(elementPath,
- "ListBucketResult/CommonPrefixes/Prefix")) {
- int which = lbData->commonPrefixesCount;
- lbData->commonPrefixLens[which] +=
- snprintf(lbData->commonPrefixes[which],
- sizeof(lbData->commonPrefixes[which]) -
- lbData->commonPrefixLens[which] - 1,
- "%.*s", dataLen, data);
- if (lbData->commonPrefixLens[which] >=
- (int) sizeof(lbData->commonPrefixes[which])) {
- return S3StatusXmlParseFailure;
- }
- }
- }
- else {
- if (!strcmp(elementPath, "ListBucketResult/Contents")) {
- // Finished a Contents
- lbData->contentsCount++;
- if (lbData->contentsCount == MAX_CONTENTS) {
- // Make the callback
- S3Status status = make_list_bucket_callback(lbData);
- if (status != S3StatusOK) {
- return status;
- }
- initialize_list_bucket_data(lbData);
- }
- else {
- // Initialize the next one
- initialize_list_bucket_contents
- (&(lbData->contents[lbData->contentsCount]));
- }
- }
- else if (!strcmp(elementPath,
- "ListBucketResult/CommonPrefixes/Prefix")) {
- // Finished a Prefix
- lbData->commonPrefixesCount++;
- if (lbData->commonPrefixesCount == MAX_COMMON_PREFIXES) {
- // Make the callback
- S3Status status = make_list_bucket_callback(lbData);
- if (status != S3StatusOK) {
- return status;
- }
- initialize_list_bucket_data(lbData);
- }
- else {
- // Initialize the next one
- lbData->commonPrefixes[lbData->commonPrefixesCount][0] = 0;
- lbData->commonPrefixLens[lbData->commonPrefixesCount] = 0;
- }
- }
- }
-
- return S3StatusOK;
+ ListBucketData *lbData = (ListBucketData *) callbackData;
+
+ int fit;
+
+ if (data) {
+ if (!strcmp(elementPath, "ListBucketResult/IsTruncated")) {
+ string_buffer_append(lbData->isTruncated, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "ListBucketResult/NextMarker")) {
+ string_buffer_append(lbData->nextMarker, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "ListBucketResult/Contents/Key")) {
+ ListBucketContents *contents =
+ &(lbData->contents[lbData->contentsCount]);
+ string_buffer_append(contents->key, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath,
+ "ListBucketResult/Contents/LastModified")) {
+ ListBucketContents *contents =
+ &(lbData->contents[lbData->contentsCount]);
+ string_buffer_append(contents->lastModified, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "ListBucketResult/Contents/ETag")) {
+ ListBucketContents *contents =
+ &(lbData->contents[lbData->contentsCount]);
+ string_buffer_append(contents->eTag, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "ListBucketResult/Contents/Size")) {
+ ListBucketContents *contents =
+ &(lbData->contents[lbData->contentsCount]);
+ string_buffer_append(contents->size, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "ListBucketResult/Contents/Owner/ID")) {
+ ListBucketContents *contents =
+ &(lbData->contents[lbData->contentsCount]);
+ string_buffer_append(contents->ownerId, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath,
+ "ListBucketResult/Contents/Owner/DisplayName")) {
+ ListBucketContents *contents =
+ &(lbData->contents[lbData->contentsCount]);
+ string_buffer_append
+ (contents->ownerDisplayName, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath,
+ "ListBucketResult/CommonPrefixes/Prefix")) {
+ int which = lbData->commonPrefixesCount;
+ lbData->commonPrefixLens[which] +=
+ snprintf(lbData->commonPrefixes[which],
+ sizeof(lbData->commonPrefixes[which]) -
+ lbData->commonPrefixLens[which] - 1,
+ "%.*s", dataLen, data);
+ if (lbData->commonPrefixLens[which] >=
+ (int) sizeof(lbData->commonPrefixes[which])) {
+ return S3StatusXmlParseFailure;
+ }
+ }
+ }
+ else {
+ if (!strcmp(elementPath, "ListBucketResult/Contents")) {
+ // Finished a Contents
+ lbData->contentsCount++;
+ if (lbData->contentsCount == MAX_CONTENTS) {
+ // Make the callback
+ S3Status status = make_list_bucket_callback(lbData);
+ if (status != S3StatusOK) {
+ return status;
+ }
+ initialize_list_bucket_data(lbData);
+ }
+ else {
+ // Initialize the next one
+ initialize_list_bucket_contents
+ (&(lbData->contents[lbData->contentsCount]));
+ }
+ }
+ else if (!strcmp(elementPath,
+ "ListBucketResult/CommonPrefixes/Prefix")) {
+ // Finished a Prefix
+ lbData->commonPrefixesCount++;
+ if (lbData->commonPrefixesCount == MAX_COMMON_PREFIXES) {
+ // Make the callback
+ S3Status status = make_list_bucket_callback(lbData);
+ if (status != S3StatusOK) {
+ return status;
+ }
+ initialize_list_bucket_data(lbData);
+ }
+ else {
+ // Initialize the next one
+ lbData->commonPrefixes[lbData->commonPrefixesCount][0] = 0;
+ lbData->commonPrefixLens[lbData->commonPrefixesCount] = 0;
+ }
+ }
+ }
+
+ return S3StatusOK;
}
static S3Status listBucketPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- ListBucketData *lbData = (ListBucketData *) callbackData;
-
- return (*(lbData->responsePropertiesCallback))
- (responseProperties, lbData->callbackData);
+ ListBucketData *lbData = (ListBucketData *) callbackData;
+
+ return (*(lbData->responsePropertiesCallback))
+ (responseProperties, lbData->callbackData);
}
static S3Status listBucketDataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- ListBucketData *lbData = (ListBucketData *) callbackData;
-
- return simplexml_add(&(lbData->simpleXml), buffer, bufferSize);
+ ListBucketData *lbData = (ListBucketData *) callbackData;
+
+ return simplexml_add(&(lbData->simpleXml), buffer, bufferSize);
}
static void listBucketCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- ListBucketData *lbData = (ListBucketData *) callbackData;
+ ListBucketData *lbData = (ListBucketData *) callbackData;
- // Make the callback if there is anything
- if (lbData->contentsCount || lbData->commonPrefixesCount) {
- make_list_bucket_callback(lbData);
- }
+ // Make the callback if there is anything
+ if (lbData->contentsCount || lbData->commonPrefixesCount) {
+ make_list_bucket_callback(lbData);
+ }
- (*(lbData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, lbData->callbackData);
+ (*(lbData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, lbData->callbackData);
- simplexml_deinitialize(&(lbData->simpleXml));
+ simplexml_deinitialize(&(lbData->simpleXml));
- free(lbData);
+ free(lbData);
}
void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix,
- const char *marker, const char *delimiter, int maxkeys,
- S3RequestContext *requestContext,
- const S3ListBucketHandler *handler, void *callbackData)
+ const char *marker, const char *delimiter, int maxkeys,
+ S3RequestContext *requestContext,
+ const S3ListBucketHandler *handler, void *callbackData)
{
- // Compose the query params
- string_buffer(queryParams, 4096);
- string_buffer_initialize(queryParams);
-
-#define safe_append(name, value) \
- do { \
- int fit; \
- if (amp) { \
- string_buffer_append(queryParams, "&", 1, fit); \
- if (!fit) { \
- (*(handler->responseHandler.completeCallback)) \
- (S3StatusQueryParamsTooLong, 0, callbackData); \
- return; \
- } \
- } \
- string_buffer_append(queryParams, name "=", \
- sizeof(name "=") - 1, fit); \
- if (!fit) { \
- (*(handler->responseHandler.completeCallback)) \
- (S3StatusQueryParamsTooLong, 0, callbackData); \
- return; \
- } \
- amp = 1; \
- char encoded[3 * 1024]; \
- if (!urlEncode(encoded, value, 1024)) { \
- (*(handler->responseHandler.completeCallback)) \
- (S3StatusQueryParamsTooLong, 0, callbackData); \
- return; \
- } \
- string_buffer_append(queryParams, encoded, strlen(encoded), \
- fit); \
- if (!fit) { \
- (*(handler->responseHandler.completeCallback)) \
- (S3StatusQueryParamsTooLong, 0, callbackData); \
- return; \
- } \
- } while (0)
-
-
- int amp = 0;
- if (prefix) {
- safe_append("prefix", prefix);
- }
- if (marker) {
- safe_append("marker", marker);
- }
- if (delimiter) {
- safe_append("delimiter", delimiter);
- }
- if (maxkeys) {
- char maxKeysString[64];
- snprintf(maxKeysString, sizeof(maxKeysString), "%d", maxkeys);
- safe_append("max-keys", maxKeysString);
- }
-
- ListBucketData *lbData =
- (ListBucketData *) malloc(sizeof(ListBucketData));
-
- if (!lbData) {
- (*(handler->responseHandler.completeCallback))
- (S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- simplexml_initialize(&(lbData->simpleXml), &listBucketXmlCallback, lbData);
-
- lbData->responsePropertiesCallback =
- handler->responseHandler.propertiesCallback;
- lbData->listBucketCallback = handler->listBucketCallback;
- lbData->responseCompleteCallback =
- handler->responseHandler.completeCallback;
- lbData->callbackData = callbackData;
-
- string_buffer_initialize(lbData->isTruncated);
- string_buffer_initialize(lbData->nextMarker);
- initialize_list_bucket_data(lbData);
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeGET, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- 0, // key
- queryParams[0] ? queryParams : 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &listBucketPropertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- &listBucketDataCallback, // fromS3Callback
- &listBucketCompleteCallback, // completeCallback
- lbData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Compose the query params
+ string_buffer(queryParams, 4096);
+ string_buffer_initialize(queryParams);
+
+#define safe_append(name, value) \
+ do { \
+ int fit; \
+ if (amp) { \
+ string_buffer_append(queryParams, "&", 1, fit); \
+ if (!fit) { \
+ (*(handler->responseHandler.completeCallback)) \
+ (S3StatusQueryParamsTooLong, 0, callbackData); \
+ return; \
+ } \
+ } \
+ string_buffer_append(queryParams, name "=", \
+ sizeof(name "=") - 1, fit); \
+ if (!fit) { \
+ (*(handler->responseHandler.completeCallback)) \
+ (S3StatusQueryParamsTooLong, 0, callbackData); \
+ return; \
+ } \
+ amp = 1; \
+ char encoded[3 * 1024]; \
+ if (!urlEncode(encoded, value, 1024)) { \
+ (*(handler->responseHandler.completeCallback)) \
+ (S3StatusQueryParamsTooLong, 0, callbackData); \
+ return; \
+ } \
+ string_buffer_append(queryParams, encoded, strlen(encoded), \
+ fit); \
+ if (!fit) { \
+ (*(handler->responseHandler.completeCallback)) \
+ (S3StatusQueryParamsTooLong, 0, callbackData); \
+ return; \
+ } \
+ } while (0)
+
+
+ int amp = 0;
+ if (prefix) {
+ safe_append("prefix", prefix);
+ }
+ if (marker) {
+ safe_append("marker", marker);
+ }
+ if (delimiter) {
+ safe_append("delimiter", delimiter);
+ }
+ if (maxkeys) {
+ char maxKeysString[64];
+ snprintf(maxKeysString, sizeof(maxKeysString), "%d", maxkeys);
+ safe_append("max-keys", maxKeysString);
+ }
+
+ ListBucketData *lbData =
+ (ListBucketData *) malloc(sizeof(ListBucketData));
+
+ if (!lbData) {
+ (*(handler->responseHandler.completeCallback))
+ (S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ simplexml_initialize(&(lbData->simpleXml), &listBucketXmlCallback, lbData);
+
+ lbData->responsePropertiesCallback =
+ handler->responseHandler.propertiesCallback;
+ lbData->listBucketCallback = handler->listBucketCallback;
+ lbData->responseCompleteCallback =
+ handler->responseHandler.completeCallback;
+ lbData->callbackData = callbackData;
+
+ string_buffer_initialize(lbData->isTruncated);
+ string_buffer_initialize(lbData->nextMarker);
+ initialize_list_bucket_data(lbData);
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeGET, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ 0, // key
+ queryParams[0] ? queryParams : 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &listBucketPropertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ &listBucketDataCallback, // fromS3Callback
+ &listBucketCompleteCallback, // completeCallback
+ lbData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
diff --git a/src/error_parser.c b/src/error_parser.c
index 929f4ce..baa206e 100644
--- a/src/error_parser.c
+++ b/src/error_parser.c
@@ -29,211 +29,211 @@
static S3Status errorXmlCallback(const char *elementPath, const char *data,
- int dataLen, void *callbackData)
+ int dataLen, void *callbackData)
{
- // We ignore end of element callbacks because we don't care about them
- if (!data) {
- return S3StatusOK;
- }
-
- ErrorParser *errorParser = (ErrorParser *) callbackData;
-
- int fit;
-
- if (!strcmp(elementPath, "Error")) {
- // Ignore, this is the Error element itself, we only care about subs
- }
- else if (!strcmp(elementPath, "Error/Code")) {
- string_buffer_append(errorParser->code, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "Error/Message")) {
- string_buffer_append(errorParser->message, data, dataLen, fit);
- errorParser->s3ErrorDetails.message = errorParser->message;
- }
- else if (!strcmp(elementPath, "Error/Resource")) {
- string_buffer_append(errorParser->resource, data, dataLen, fit);
- errorParser->s3ErrorDetails.resource = errorParser->resource;
- }
- else if (!strcmp(elementPath, "Error/FurtherDetails")) {
- string_buffer_append(errorParser->furtherDetails, data, dataLen, fit);
- errorParser->s3ErrorDetails.furtherDetails =
- errorParser->furtherDetails;
- }
- else {
- if (strncmp(elementPath, "Error/", sizeof("Error/") - 1)) {
- // If for some weird reason it's not within the Error element,
- // ignore it
- return S3StatusOK;
- }
- // It's an unknown error element. See if it matches the most
- // recent error element.
- const char *elementName = &(elementPath[sizeof("Error/") - 1]);
- if (errorParser->s3ErrorDetails.extraDetailsCount &&
- !strcmp(elementName, errorParser->s3ErrorDetails.extraDetails
- [errorParser->s3ErrorDetails.extraDetailsCount - 1].name)) {
- // Append the value
- string_multibuffer_append(errorParser->extraDetailsNamesValues,
- data, dataLen, fit);
- // If it didn't fit, remove this extra
- if (!fit) {
- errorParser->s3ErrorDetails.extraDetailsCount--;
- }
- return S3StatusOK;
- }
- // OK, must add another unknown error element, if it will fit.
- if (errorParser->s3ErrorDetails.extraDetailsCount ==
- sizeof(errorParser->extraDetails)) {
- // Won't fit. Ignore this one.
- return S3StatusOK;
- }
- // Copy in the name and value
- char *name = string_multibuffer_current
- (errorParser->extraDetailsNamesValues);
- int nameLen = strlen(elementName);
- string_multibuffer_add(errorParser->extraDetailsNamesValues,
- elementName, nameLen, fit);
- if (!fit) {
- // Name didn't fit; ignore this one.
- return S3StatusOK;
- }
- char *value = string_multibuffer_current
- (errorParser->extraDetailsNamesValues);
- string_multibuffer_add(errorParser->extraDetailsNamesValues,
- data, dataLen, fit);
- if (!fit) {
- // Value didn't fit; ignore this one.
- return S3StatusOK;
- }
- S3NameValue *nv =
- &(errorParser->extraDetails
- [errorParser->s3ErrorDetails.extraDetailsCount++]);
- nv->name = name;
- nv->value = value;
- }
-
- return S3StatusOK;
+ // We ignore end of element callbacks because we don't care about them
+ if (!data) {
+ return S3StatusOK;
+ }
+
+ ErrorParser *errorParser = (ErrorParser *) callbackData;
+
+ int fit;
+
+ if (!strcmp(elementPath, "Error")) {
+ // Ignore, this is the Error element itself, we only care about subs
+ }
+ else if (!strcmp(elementPath, "Error/Code")) {
+ string_buffer_append(errorParser->code, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "Error/Message")) {
+ string_buffer_append(errorParser->message, data, dataLen, fit);
+ errorParser->s3ErrorDetails.message = errorParser->message;
+ }
+ else if (!strcmp(elementPath, "Error/Resource")) {
+ string_buffer_append(errorParser->resource, data, dataLen, fit);
+ errorParser->s3ErrorDetails.resource = errorParser->resource;
+ }
+ else if (!strcmp(elementPath, "Error/FurtherDetails")) {
+ string_buffer_append(errorParser->furtherDetails, data, dataLen, fit);
+ errorParser->s3ErrorDetails.furtherDetails =
+ errorParser->furtherDetails;
+ }
+ else {
+ if (strncmp(elementPath, "Error/", sizeof("Error/") - 1)) {
+ // If for some weird reason it's not within the Error element,
+ // ignore it
+ return S3StatusOK;
+ }
+ // It's an unknown error element. See if it matches the most
+ // recent error element.
+ const char *elementName = &(elementPath[sizeof("Error/") - 1]);
+ if (errorParser->s3ErrorDetails.extraDetailsCount &&
+ !strcmp(elementName, errorParser->s3ErrorDetails.extraDetails
+ [errorParser->s3ErrorDetails.extraDetailsCount - 1].name)) {
+ // Append the value
+ string_multibuffer_append(errorParser->extraDetailsNamesValues,
+ data, dataLen, fit);
+ // If it didn't fit, remove this extra
+ if (!fit) {
+ errorParser->s3ErrorDetails.extraDetailsCount--;
+ }
+ return S3StatusOK;
+ }
+ // OK, must add another unknown error element, if it will fit.
+ if (errorParser->s3ErrorDetails.extraDetailsCount ==
+ sizeof(errorParser->extraDetails)) {
+ // Won't fit. Ignore this one.
+ return S3StatusOK;
+ }
+ // Copy in the name and value
+ char *name = string_multibuffer_current
+ (errorParser->extraDetailsNamesValues);
+ int nameLen = strlen(elementName);
+ string_multibuffer_add(errorParser->extraDetailsNamesValues,
+ elementName, nameLen, fit);
+ if (!fit) {
+ // Name didn't fit; ignore this one.
+ return S3StatusOK;
+ }
+ char *value = string_multibuffer_current
+ (errorParser->extraDetailsNamesValues);
+ string_multibuffer_add(errorParser->extraDetailsNamesValues,
+ data, dataLen, fit);
+ if (!fit) {
+ // Value didn't fit; ignore this one.
+ return S3StatusOK;
+ }
+ S3NameValue *nv =
+ &(errorParser->extraDetails
+ [errorParser->s3ErrorDetails.extraDetailsCount++]);
+ nv->name = name;
+ nv->value = value;
+ }
+
+ return S3StatusOK;
}
void error_parser_initialize(ErrorParser *errorParser)
{
- errorParser->s3ErrorDetails.message = 0;
- errorParser->s3ErrorDetails.resource = 0;
- errorParser->s3ErrorDetails.furtherDetails = 0;
- errorParser->s3ErrorDetails.extraDetailsCount = 0;
- errorParser->s3ErrorDetails.extraDetails = errorParser->extraDetails;
- errorParser->errorXmlParserInitialized = 0;
- string_buffer_initialize(errorParser->code);
- string_buffer_initialize(errorParser->message);
- string_buffer_initialize(errorParser->resource);
- string_buffer_initialize(errorParser->furtherDetails);
- string_multibuffer_initialize(errorParser->extraDetailsNamesValues);
+ errorParser->s3ErrorDetails.message = 0;
+ errorParser->s3ErrorDetails.resource = 0;
+ errorParser->s3ErrorDetails.furtherDetails = 0;
+ errorParser->s3ErrorDetails.extraDetailsCount = 0;
+ errorParser->s3ErrorDetails.extraDetails = errorParser->extraDetails;
+ errorParser->errorXmlParserInitialized = 0;
+ string_buffer_initialize(errorParser->code);
+ string_buffer_initialize(errorParser->message);
+ string_buffer_initialize(errorParser->resource);
+ string_buffer_initialize(errorParser->furtherDetails);
+ string_multibuffer_initialize(errorParser->extraDetailsNamesValues);
}
S3Status error_parser_add(ErrorParser *errorParser, char *buffer,
- int bufferSize)
+ int bufferSize)
{
- if (!errorParser->errorXmlParserInitialized) {
- simplexml_initialize(&(errorParser->errorXmlParser), &errorXmlCallback,
- errorParser);
- errorParser->errorXmlParserInitialized = 1;
- }
+ if (!errorParser->errorXmlParserInitialized) {
+ simplexml_initialize(&(errorParser->errorXmlParser), &errorXmlCallback,
+ errorParser);
+ errorParser->errorXmlParserInitialized = 1;
+ }
- return simplexml_add(&(errorParser->errorXmlParser), buffer, bufferSize);
+ return simplexml_add(&(errorParser->errorXmlParser), buffer, bufferSize);
}
void error_parser_convert_status(ErrorParser *errorParser, S3Status *status)
{
- // Convert the error status string into a code
- if (!errorParser->codeLen) {
- return;
- }
-
-#define HANDLE_CODE(name) \
- do { \
- if (!strcmp(errorParser->code, #name)) { \
- *status = S3StatusError##name; \
- goto code_set; \
- } \
- } while (0)
-
- HANDLE_CODE(AccessDenied);
- HANDLE_CODE(AccountProblem);
- HANDLE_CODE(AmbiguousGrantByEmailAddress);
- HANDLE_CODE(BadDigest);
- HANDLE_CODE(BucketAlreadyExists);
- HANDLE_CODE(BucketAlreadyOwnedByYou);
- HANDLE_CODE(BucketNotEmpty);
- HANDLE_CODE(CredentialsNotSupported);
- HANDLE_CODE(CrossLocationLoggingProhibited);
- HANDLE_CODE(EntityTooSmall);
- HANDLE_CODE(EntityTooLarge);
- HANDLE_CODE(ExpiredToken);
- HANDLE_CODE(IncompleteBody);
- HANDLE_CODE(IncorrectNumberOfFilesInPostRequest);
- HANDLE_CODE(InlineDataTooLarge);
- HANDLE_CODE(InternalError);
- HANDLE_CODE(InvalidAccessKeyId);
- HANDLE_CODE(InvalidAddressingHeader);
- HANDLE_CODE(InvalidArgument);
- HANDLE_CODE(InvalidBucketName);
- HANDLE_CODE(InvalidDigest);
- HANDLE_CODE(InvalidLocationConstraint);
- HANDLE_CODE(InvalidPayer);
- HANDLE_CODE(InvalidPolicyDocument);
- HANDLE_CODE(InvalidRange);
- HANDLE_CODE(InvalidSecurity);
- HANDLE_CODE(InvalidSOAPRequest);
- HANDLE_CODE(InvalidStorageClass);
- HANDLE_CODE(InvalidTargetBucketForLogging);
- HANDLE_CODE(InvalidToken);
- HANDLE_CODE(InvalidURI);
- HANDLE_CODE(KeyTooLong);
- HANDLE_CODE(MalformedACLError);
- HANDLE_CODE(MalformedXML);
- HANDLE_CODE(MaxMessageLengthExceeded);
- HANDLE_CODE(MaxPostPreDataLengthExceededError);
- HANDLE_CODE(MetadataTooLarge);
- HANDLE_CODE(MethodNotAllowed);
- HANDLE_CODE(MissingAttachment);
- HANDLE_CODE(MissingContentLength);
- HANDLE_CODE(MissingSecurityElement);
- HANDLE_CODE(MissingSecurityHeader);
- HANDLE_CODE(NoLoggingStatusForKey);
- HANDLE_CODE(NoSuchBucket);
- HANDLE_CODE(NoSuchKey);
- HANDLE_CODE(NotImplemented);
- HANDLE_CODE(NotSignedUp);
- HANDLE_CODE(OperationAborted);
- HANDLE_CODE(PermanentRedirect);
- HANDLE_CODE(PreconditionFailed);
- HANDLE_CODE(Redirect);
- HANDLE_CODE(RequestIsNotMultiPartContent);
- HANDLE_CODE(RequestTimeout);
- HANDLE_CODE(RequestTimeTooSkewed);
- HANDLE_CODE(RequestTorrentOfBucketError);
- HANDLE_CODE(SignatureDoesNotMatch);
- HANDLE_CODE(SlowDown);
- HANDLE_CODE(TemporaryRedirect);
- HANDLE_CODE(TokenRefreshRequired);
- HANDLE_CODE(TooManyBuckets);
- HANDLE_CODE(UnexpectedContent);
- HANDLE_CODE(UnresolvableGrantByEmailAddress);
- HANDLE_CODE(UserKeyMustBeSpecified);
- *status = S3StatusErrorUnknown;
+ // Convert the error status string into a code
+ if (!errorParser->codeLen) {
+ return;
+ }
+
+#define HANDLE_CODE(name) \
+ do { \
+ if (!strcmp(errorParser->code, #name)) { \
+ *status = S3StatusError##name; \
+ goto code_set; \
+ } \
+ } while (0)
+
+ HANDLE_CODE(AccessDenied);
+ HANDLE_CODE(AccountProblem);
+ HANDLE_CODE(AmbiguousGrantByEmailAddress);
+ HANDLE_CODE(BadDigest);
+ HANDLE_CODE(BucketAlreadyExists);
+ HANDLE_CODE(BucketAlreadyOwnedByYou);
+ HANDLE_CODE(BucketNotEmpty);
+ HANDLE_CODE(CredentialsNotSupported);
+ HANDLE_CODE(CrossLocationLoggingProhibited);
+ HANDLE_CODE(EntityTooSmall);
+ HANDLE_CODE(EntityTooLarge);
+ HANDLE_CODE(ExpiredToken);
+ HANDLE_CODE(IncompleteBody);
+ HANDLE_CODE(IncorrectNumberOfFilesInPostRequest);
+ HANDLE_CODE(InlineDataTooLarge);
+ HANDLE_CODE(InternalError);
+ HANDLE_CODE(InvalidAccessKeyId);
+ HANDLE_CODE(InvalidAddressingHeader);
+ HANDLE_CODE(InvalidArgument);
+ HANDLE_CODE(InvalidBucketName);
+ HANDLE_CODE(InvalidDigest);
+ HANDLE_CODE(InvalidLocationConstraint);
+ HANDLE_CODE(InvalidPayer);
+ HANDLE_CODE(InvalidPolicyDocument);
+ HANDLE_CODE(InvalidRange);
+ HANDLE_CODE(InvalidSecurity);
+ HANDLE_CODE(InvalidSOAPRequest);
+ HANDLE_CODE(InvalidStorageClass);
+ HANDLE_CODE(InvalidTargetBucketForLogging);
+ HANDLE_CODE(InvalidToken);
+ HANDLE_CODE(InvalidURI);
+ HANDLE_CODE(KeyTooLong);
+ HANDLE_CODE(MalformedACLError);
+ HANDLE_CODE(MalformedXML);
+ HANDLE_CODE(MaxMessageLengthExceeded);
+ HANDLE_CODE(MaxPostPreDataLengthExceededError);
+ HANDLE_CODE(MetadataTooLarge);
+ HANDLE_CODE(MethodNotAllowed);
+ HANDLE_CODE(MissingAttachment);
+ HANDLE_CODE(MissingContentLength);
+ HANDLE_CODE(MissingSecurityElement);
+ HANDLE_CODE(MissingSecurityHeader);
+ HANDLE_CODE(NoLoggingStatusForKey);
+ HANDLE_CODE(NoSuchBucket);
+ HANDLE_CODE(NoSuchKey);
+ HANDLE_CODE(NotImplemented);
+ HANDLE_CODE(NotSignedUp);
+ HANDLE_CODE(OperationAborted);
+ HANDLE_CODE(PermanentRedirect);
+ HANDLE_CODE(PreconditionFailed);
+ HANDLE_CODE(Redirect);
+ HANDLE_CODE(RequestIsNotMultiPartContent);
+ HANDLE_CODE(RequestTimeout);
+ HANDLE_CODE(RequestTimeTooSkewed);
+ HANDLE_CODE(RequestTorrentOfBucketError);
+ HANDLE_CODE(SignatureDoesNotMatch);
+ HANDLE_CODE(SlowDown);
+ HANDLE_CODE(TemporaryRedirect);
+ HANDLE_CODE(TokenRefreshRequired);
+ HANDLE_CODE(TooManyBuckets);
+ HANDLE_CODE(UnexpectedContent);
+ HANDLE_CODE(UnresolvableGrantByEmailAddress);
+ HANDLE_CODE(UserKeyMustBeSpecified);
+ *status = S3StatusErrorUnknown;
code_set:
- return;
+ return;
}
// Always call this
void error_parser_deinitialize(ErrorParser *errorParser)
{
- if (errorParser->errorXmlParserInitialized) {
- simplexml_deinitialize(&(errorParser->errorXmlParser));
- }
+ if (errorParser->errorXmlParserInitialized) {
+ simplexml_deinitialize(&(errorParser->errorXmlParser));
+ }
}
diff --git a/src/general.c b/src/general.c
index 1e23812..861c289 100644
--- a/src/general.c
+++ b/src/general.c
@@ -34,442 +34,442 @@ static int initializeCountG = 0;
S3Status S3_initialize(const char *userAgentInfo, int flags)
{
- if (initializeCountG++) {
- return S3StatusOK;
- }
+ if (initializeCountG++) {
+ return S3StatusOK;
+ }
- return request_api_initialize(userAgentInfo, flags);
+ return request_api_initialize(userAgentInfo, flags);
}
void S3_deinitialize()
{
- if (--initializeCountG) {
- return;
- }
+ if (--initializeCountG) {
+ return;
+ }
- request_api_deinitialize();
+ request_api_deinitialize();
}
const char *S3_get_status_name(S3Status status)
{
- switch (status) {
-#define handlecase(s) \
- case S3Status##s: \
- return #s
-
- handlecase(OK);
- handlecase(InternalError);
- handlecase(OutOfMemory);
- handlecase(Interrupted);
- handlecase(InvalidBucketNameTooLong);
- handlecase(InvalidBucketNameFirstCharacter);
- handlecase(InvalidBucketNameCharacter);
- handlecase(InvalidBucketNameCharacterSequence);
- handlecase(InvalidBucketNameTooShort);
- handlecase(InvalidBucketNameDotQuadNotation);
- handlecase(QueryParamsTooLong);
- handlecase(FailedToInitializeRequest);
- handlecase(MetaDataHeadersTooLong);
- handlecase(BadMetaData);
- handlecase(BadContentType);
- handlecase(ContentTypeTooLong);
- handlecase(BadMD5);
- handlecase(MD5TooLong);
- handlecase(BadCacheControl);
- handlecase(CacheControlTooLong);
- handlecase(BadContentDispositionFilename);
- handlecase(ContentDispositionFilenameTooLong);
- handlecase(BadContentEncoding);
- handlecase(ContentEncodingTooLong);
- handlecase(BadIfMatchETag);
- handlecase(IfMatchETagTooLong);
- handlecase(BadIfNotMatchETag);
- handlecase(IfNotMatchETagTooLong);
- handlecase(HeadersTooLong);
- handlecase(KeyTooLong);
- handlecase(UriTooLong);
- handlecase(XmlParseFailure);
- handlecase(EmailAddressTooLong);
- handlecase(UserIdTooLong);
- handlecase(UserDisplayNameTooLong);
- handlecase(GroupUriTooLong);
- handlecase(PermissionTooLong);
- handlecase(TargetBucketTooLong);
- handlecase(TargetPrefixTooLong);
- handlecase(TooManyGrants);
- handlecase(BadGrantee);
- handlecase(BadPermission);
- handlecase(XmlDocumentTooLarge);
- handlecase(NameLookupError);
- handlecase(FailedToConnect);
- handlecase(ServerFailedVerification);
- handlecase(ConnectionFailed);
- handlecase(AbortedByCallback);
- handlecase(ErrorAccessDenied);
- handlecase(ErrorAccountProblem);
- handlecase(ErrorAmbiguousGrantByEmailAddress);
- handlecase(ErrorBadDigest);
- handlecase(ErrorBucketAlreadyExists);
- handlecase(ErrorBucketAlreadyOwnedByYou);
- handlecase(ErrorBucketNotEmpty);
- handlecase(ErrorCredentialsNotSupported);
- handlecase(ErrorCrossLocationLoggingProhibited);
- handlecase(ErrorEntityTooSmall);
- handlecase(ErrorEntityTooLarge);
- handlecase(ErrorExpiredToken);
- handlecase(ErrorIncompleteBody);
- handlecase(ErrorIncorrectNumberOfFilesInPostRequest);
- handlecase(ErrorInlineDataTooLarge);
- handlecase(ErrorInternalError);
- handlecase(ErrorInvalidAccessKeyId);
- handlecase(ErrorInvalidAddressingHeader);
- handlecase(ErrorInvalidArgument);
- handlecase(ErrorInvalidBucketName);
- handlecase(ErrorInvalidDigest);
- handlecase(ErrorInvalidLocationConstraint);
- handlecase(ErrorInvalidPayer);
- handlecase(ErrorInvalidPolicyDocument);
- handlecase(ErrorInvalidRange);
- handlecase(ErrorInvalidSecurity);
- handlecase(ErrorInvalidSOAPRequest);
- handlecase(ErrorInvalidStorageClass);
- handlecase(ErrorInvalidTargetBucketForLogging);
- handlecase(ErrorInvalidToken);
- handlecase(ErrorInvalidURI);
- handlecase(ErrorKeyTooLong);
- handlecase(ErrorMalformedACLError);
- handlecase(ErrorMalformedXML);
- handlecase(ErrorMaxMessageLengthExceeded);
- handlecase(ErrorMaxPostPreDataLengthExceededError);
- handlecase(ErrorMetadataTooLarge);
- handlecase(ErrorMethodNotAllowed);
- handlecase(ErrorMissingAttachment);
- handlecase(ErrorMissingContentLength);
- handlecase(ErrorMissingSecurityElement);
- handlecase(ErrorMissingSecurityHeader);
- handlecase(ErrorNoLoggingStatusForKey);
- handlecase(ErrorNoSuchBucket);
- handlecase(ErrorNoSuchKey);
- handlecase(ErrorNotImplemented);
- handlecase(ErrorNotSignedUp);
- handlecase(ErrorOperationAborted);
- handlecase(ErrorPermanentRedirect);
- handlecase(ErrorPreconditionFailed);
- handlecase(ErrorRedirect);
- handlecase(ErrorRequestIsNotMultiPartContent);
- handlecase(ErrorRequestTimeout);
- handlecase(ErrorRequestTimeTooSkewed);
- handlecase(ErrorRequestTorrentOfBucketError);
- handlecase(ErrorSignatureDoesNotMatch);
- handlecase(ErrorSlowDown);
- handlecase(ErrorTemporaryRedirect);
- handlecase(ErrorTokenRefreshRequired);
- handlecase(ErrorTooManyBuckets);
- handlecase(ErrorUnexpectedContent);
- handlecase(ErrorUnresolvableGrantByEmailAddress);
- handlecase(ErrorUserKeyMustBeSpecified);
- handlecase(ErrorUnknown);
- handlecase(HttpErrorMovedTemporarily);
- handlecase(HttpErrorBadRequest);
- handlecase(HttpErrorForbidden);
- handlecase(HttpErrorNotFound);
- handlecase(HttpErrorConflict);
- handlecase(HttpErrorUnknown);
- }
-
- return "Unknown";
+ switch (status) {
+#define handlecase(s) \
+ case S3Status##s: \
+ return #s
+
+ handlecase(OK);
+ handlecase(InternalError);
+ handlecase(OutOfMemory);
+ handlecase(Interrupted);
+ handlecase(InvalidBucketNameTooLong);
+ handlecase(InvalidBucketNameFirstCharacter);
+ handlecase(InvalidBucketNameCharacter);
+ handlecase(InvalidBucketNameCharacterSequence);
+ handlecase(InvalidBucketNameTooShort);
+ handlecase(InvalidBucketNameDotQuadNotation);
+ handlecase(QueryParamsTooLong);
+ handlecase(FailedToInitializeRequest);
+ handlecase(MetaDataHeadersTooLong);
+ handlecase(BadMetaData);
+ handlecase(BadContentType);
+ handlecase(ContentTypeTooLong);
+ handlecase(BadMD5);
+ handlecase(MD5TooLong);
+ handlecase(BadCacheControl);
+ handlecase(CacheControlTooLong);
+ handlecase(BadContentDispositionFilename);
+ handlecase(ContentDispositionFilenameTooLong);
+ handlecase(BadContentEncoding);
+ handlecase(ContentEncodingTooLong);
+ handlecase(BadIfMatchETag);
+ handlecase(IfMatchETagTooLong);
+ handlecase(BadIfNotMatchETag);
+ handlecase(IfNotMatchETagTooLong);
+ handlecase(HeadersTooLong);
+ handlecase(KeyTooLong);
+ handlecase(UriTooLong);
+ handlecase(XmlParseFailure);
+ handlecase(EmailAddressTooLong);
+ handlecase(UserIdTooLong);
+ handlecase(UserDisplayNameTooLong);
+ handlecase(GroupUriTooLong);
+ handlecase(PermissionTooLong);
+ handlecase(TargetBucketTooLong);
+ handlecase(TargetPrefixTooLong);
+ handlecase(TooManyGrants);
+ handlecase(BadGrantee);
+ handlecase(BadPermission);
+ handlecase(XmlDocumentTooLarge);
+ handlecase(NameLookupError);
+ handlecase(FailedToConnect);
+ handlecase(ServerFailedVerification);
+ handlecase(ConnectionFailed);
+ handlecase(AbortedByCallback);
+ handlecase(ErrorAccessDenied);
+ handlecase(ErrorAccountProblem);
+ handlecase(ErrorAmbiguousGrantByEmailAddress);
+ handlecase(ErrorBadDigest);
+ handlecase(ErrorBucketAlreadyExists);
+ handlecase(ErrorBucketAlreadyOwnedByYou);
+ handlecase(ErrorBucketNotEmpty);
+ handlecase(ErrorCredentialsNotSupported);
+ handlecase(ErrorCrossLocationLoggingProhibited);
+ handlecase(ErrorEntityTooSmall);
+ handlecase(ErrorEntityTooLarge);
+ handlecase(ErrorExpiredToken);
+ handlecase(ErrorIncompleteBody);
+ handlecase(ErrorIncorrectNumberOfFilesInPostRequest);
+ handlecase(ErrorInlineDataTooLarge);
+ handlecase(ErrorInternalError);
+ handlecase(ErrorInvalidAccessKeyId);
+ handlecase(ErrorInvalidAddressingHeader);
+ handlecase(ErrorInvalidArgument);
+ handlecase(ErrorInvalidBucketName);
+ handlecase(ErrorInvalidDigest);
+ handlecase(ErrorInvalidLocationConstraint);
+ handlecase(ErrorInvalidPayer);
+ handlecase(ErrorInvalidPolicyDocument);
+ handlecase(ErrorInvalidRange);
+ handlecase(ErrorInvalidSecurity);
+ handlecase(ErrorInvalidSOAPRequest);
+ handlecase(ErrorInvalidStorageClass);
+ handlecase(ErrorInvalidTargetBucketForLogging);
+ handlecase(ErrorInvalidToken);
+ handlecase(ErrorInvalidURI);
+ handlecase(ErrorKeyTooLong);
+ handlecase(ErrorMalformedACLError);
+ handlecase(ErrorMalformedXML);
+ handlecase(ErrorMaxMessageLengthExceeded);
+ handlecase(ErrorMaxPostPreDataLengthExceededError);
+ handlecase(ErrorMetadataTooLarge);
+ handlecase(ErrorMethodNotAllowed);
+ handlecase(ErrorMissingAttachment);
+ handlecase(ErrorMissingContentLength);
+ handlecase(ErrorMissingSecurityElement);
+ handlecase(ErrorMissingSecurityHeader);
+ handlecase(ErrorNoLoggingStatusForKey);
+ handlecase(ErrorNoSuchBucket);
+ handlecase(ErrorNoSuchKey);
+ handlecase(ErrorNotImplemented);
+ handlecase(ErrorNotSignedUp);
+ handlecase(ErrorOperationAborted);
+ handlecase(ErrorPermanentRedirect);
+ handlecase(ErrorPreconditionFailed);
+ handlecase(ErrorRedirect);
+ handlecase(ErrorRequestIsNotMultiPartContent);
+ handlecase(ErrorRequestTimeout);
+ handlecase(ErrorRequestTimeTooSkewed);
+ handlecase(ErrorRequestTorrentOfBucketError);
+ handlecase(ErrorSignatureDoesNotMatch);
+ handlecase(ErrorSlowDown);
+ handlecase(ErrorTemporaryRedirect);
+ handlecase(ErrorTokenRefreshRequired);
+ handlecase(ErrorTooManyBuckets);
+ handlecase(ErrorUnexpectedContent);
+ handlecase(ErrorUnresolvableGrantByEmailAddress);
+ handlecase(ErrorUserKeyMustBeSpecified);
+ handlecase(ErrorUnknown);
+ handlecase(HttpErrorMovedTemporarily);
+ handlecase(HttpErrorBadRequest);
+ handlecase(HttpErrorForbidden);
+ handlecase(HttpErrorNotFound);
+ handlecase(HttpErrorConflict);
+ handlecase(HttpErrorUnknown);
+ }
+
+ return "Unknown";
}
S3Status S3_validate_bucket_name(const char *bucketName, S3UriStyle uriStyle)
{
- int virtualHostStyle = (uriStyle == S3UriStyleVirtualHost);
- int len = 0, maxlen = virtualHostStyle ? 63 : 255;
- const char *b = bucketName;
-
- int hasDot = 0;
- int hasNonDigit = 0;
-
- while (*b) {
- if (len == maxlen) {
- return S3StatusInvalidBucketNameTooLong;
- }
- else if (isalpha(*b)) {
- len++, b++;
- hasNonDigit = 1;
- }
- else if (isdigit(*b)) {
- len++, b++;
- }
- else if (len == 0) {
- return S3StatusInvalidBucketNameFirstCharacter;
- }
- else if (*b == '_') {
- /* Virtual host style bucket names cannot have underscores */
- if (virtualHostStyle) {
- return S3StatusInvalidBucketNameCharacter;
- }
- len++, b++;
- hasNonDigit = 1;
- }
- else if (*b == '-') {
- /* Virtual host style bucket names cannot have .- */
- if (virtualHostStyle && (b > bucketName) && (*(b - 1) == '.')) {
- return S3StatusInvalidBucketNameCharacterSequence;
- }
- len++, b++;
- hasNonDigit = 1;
- }
- else if (*b == '.') {
- /* Virtual host style bucket names cannot have -. */
- if (virtualHostStyle && (b > bucketName) && (*(b - 1) == '-')) {
- return S3StatusInvalidBucketNameCharacterSequence;
- }
- len++, b++;
- hasDot = 1;
- }
- else {
- return S3StatusInvalidBucketNameCharacter;
- }
- }
-
- if (len < 3) {
- return S3StatusInvalidBucketNameTooShort;
- }
-
- /* It's not clear from Amazon's documentation exactly what 'IP address
- style' means. In its strictest sense, it could mean 'could be a valid
- IP address', which would mean that 255.255.255.255 would be invalid,
- wherase 256.256.256.256 would be valid. Or it could mean 'has 4 sets
- of digits separated by dots'. Who knows. Let's just be really
- conservative here: if it has any dots, and no non-digit characters,
- then we reject it */
- if (hasDot && !hasNonDigit) {
- return S3StatusInvalidBucketNameDotQuadNotation;
- }
-
- return S3StatusOK;
+ int virtualHostStyle = (uriStyle == S3UriStyleVirtualHost);
+ int len = 0, maxlen = virtualHostStyle ? 63 : 255;
+ const char *b = bucketName;
+
+ int hasDot = 0;
+ int hasNonDigit = 0;
+
+ while (*b) {
+ if (len == maxlen) {
+ return S3StatusInvalidBucketNameTooLong;
+ }
+ else if (isalpha(*b)) {
+ len++, b++;
+ hasNonDigit = 1;
+ }
+ else if (isdigit(*b)) {
+ len++, b++;
+ }
+ else if (len == 0) {
+ return S3StatusInvalidBucketNameFirstCharacter;
+ }
+ else if (*b == '_') {
+ /* Virtual host style bucket names cannot have underscores */
+ if (virtualHostStyle) {
+ return S3StatusInvalidBucketNameCharacter;
+ }
+ len++, b++;
+ hasNonDigit = 1;
+ }
+ else if (*b == '-') {
+ /* Virtual host style bucket names cannot have .- */
+ if (virtualHostStyle && (b > bucketName) && (*(b - 1) == '.')) {
+ return S3StatusInvalidBucketNameCharacterSequence;
+ }
+ len++, b++;
+ hasNonDigit = 1;
+ }
+ else if (*b == '.') {
+ /* Virtual host style bucket names cannot have -. */
+ if (virtualHostStyle && (b > bucketName) && (*(b - 1) == '-')) {
+ return S3StatusInvalidBucketNameCharacterSequence;
+ }
+ len++, b++;
+ hasDot = 1;
+ }
+ else {
+ return S3StatusInvalidBucketNameCharacter;
+ }
+ }
+
+ if (len < 3) {
+ return S3StatusInvalidBucketNameTooShort;
+ }
+
+ /* It's not clear from Amazon's documentation exactly what 'IP address
+ style' means. In its strictest sense, it could mean 'could be a valid
+ IP address', which would mean that 255.255.255.255 would be invalid,
+ wherase 256.256.256.256 would be valid. Or it could mean 'has 4 sets
+ of digits separated by dots'. Who knows. Let's just be really
+ conservative here: if it has any dots, and no non-digit characters,
+ then we reject it */
+ if (hasDot && !hasNonDigit) {
+ return S3StatusInvalidBucketNameDotQuadNotation;
+ }
+
+ return S3StatusOK;
}
typedef struct ConvertAclData
{
- char *ownerId;
- int ownerIdLen;
- char *ownerDisplayName;
- int ownerDisplayNameLen;
- int *aclGrantCountReturn;
- S3AclGrant *aclGrants;
-
- string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE);
- string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE);
- string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
- string_buffer(groupUri, 128);
- string_buffer(permission, 32);
+ char *ownerId;
+ int ownerIdLen;
+ char *ownerDisplayName;
+ int ownerDisplayNameLen;
+ int *aclGrantCountReturn;
+ S3AclGrant *aclGrants;
+
+ string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE);
+ string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE);
+ string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
+ string_buffer(groupUri, 128);
+ string_buffer(permission, 32);
} ConvertAclData;
static S3Status convertAclXmlCallback(const char *elementPath,
- const char *data, int dataLen,
- void *callbackData)
+ const char *data, int dataLen,
+ void *callbackData)
{
- ConvertAclData *caData = (ConvertAclData *) callbackData;
-
- int fit;
-
- if (data) {
- if (!strcmp(elementPath, "AccessControlPolicy/Owner/ID")) {
- caData->ownerIdLen +=
- snprintf(&(caData->ownerId[caData->ownerIdLen]),
- S3_MAX_GRANTEE_USER_ID_SIZE - caData->ownerIdLen - 1,
- "%.*s", dataLen, data);
- if (caData->ownerIdLen >= S3_MAX_GRANTEE_USER_ID_SIZE) {
- return S3StatusUserIdTooLong;
- }
- }
- else if (!strcmp(elementPath, "AccessControlPolicy/Owner/"
- "DisplayName")) {
- caData->ownerDisplayNameLen +=
- snprintf(&(caData->ownerDisplayName
- [caData->ownerDisplayNameLen]),
- S3_MAX_GRANTEE_DISPLAY_NAME_SIZE -
- caData->ownerDisplayNameLen - 1,
- "%.*s", dataLen, data);
- if (caData->ownerDisplayNameLen >=
- S3_MAX_GRANTEE_DISPLAY_NAME_SIZE) {
- return S3StatusUserDisplayNameTooLong;
- }
- }
- else if (!strcmp(elementPath,
- "AccessControlPolicy/AccessControlList/Grant/"
- "Grantee/EmailAddress")) {
- // AmazonCustomerByEmail
- string_buffer_append(caData->emailAddress, data, dataLen, fit);
- if (!fit) {
- return S3StatusEmailAddressTooLong;
- }
- }
- else if (!strcmp(elementPath,
- "AccessControlPolicy/AccessControlList/Grant/"
- "Grantee/ID")) {
- // CanonicalUser
- string_buffer_append(caData->userId, data, dataLen, fit);
- if (!fit) {
- return S3StatusUserIdTooLong;
- }
- }
- else if (!strcmp(elementPath,
- "AccessControlPolicy/AccessControlList/Grant/"
- "Grantee/DisplayName")) {
- // CanonicalUser
- string_buffer_append(caData->userDisplayName, data, dataLen, fit);
- if (!fit) {
- return S3StatusUserDisplayNameTooLong;
- }
- }
- else if (!strcmp(elementPath,
- "AccessControlPolicy/AccessControlList/Grant/"
- "Grantee/URI")) {
- // Group
- string_buffer_append(caData->groupUri, data, dataLen, fit);
- if (!fit) {
- return S3StatusGroupUriTooLong;
- }
- }
- else if (!strcmp(elementPath,
- "AccessControlPolicy/AccessControlList/Grant/"
- "Permission")) {
- // Permission
- string_buffer_append(caData->permission, data, dataLen, fit);
- if (!fit) {
- return S3StatusPermissionTooLong;
- }
- }
- }
- else {
- if (!strcmp(elementPath, "AccessControlPolicy/AccessControlList/"
- "Grant")) {
- // A grant has just been completed; so add the next S3AclGrant
- // based on the values read
- if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) {
- return S3StatusTooManyGrants;
- }
-
- S3AclGrant *grant = &(caData->aclGrants
- [*(caData->aclGrantCountReturn)]);
-
- if (caData->emailAddress[0]) {
- grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
- strcpy(grant->grantee.amazonCustomerByEmail.emailAddress,
- caData->emailAddress);
- }
- else if (caData->userId[0] && caData->userDisplayName[0]) {
- grant->granteeType = S3GranteeTypeCanonicalUser;
- strcpy(grant->grantee.canonicalUser.id, caData->userId);
- strcpy(grant->grantee.canonicalUser.displayName,
- caData->userDisplayName);
- }
- else if (caData->groupUri[0]) {
- if (!strcmp(caData->groupUri,
- "http://acs.amazonaws.com/groups/global/"
- "AuthenticatedUsers")) {
- grant->granteeType = S3GranteeTypeAllAwsUsers;
- }
- else if (!strcmp(caData->groupUri,
- "http://acs.amazonaws.com/groups/global/"
- "AllUsers")) {
- grant->granteeType = S3GranteeTypeAllUsers;
- }
- else if (!strcmp(caData->groupUri,
- "http://acs.amazonaws.com/groups/s3/"
- "LogDelivery")) {
- grant->granteeType = S3GranteeTypeLogDelivery;
- }
- else {
- return S3StatusBadGrantee;
- }
- }
- else {
- return S3StatusBadGrantee;
- }
-
- if (!strcmp(caData->permission, "READ")) {
- grant->permission = S3PermissionRead;
- }
- else if (!strcmp(caData->permission, "WRITE")) {
- grant->permission = S3PermissionWrite;
- }
- else if (!strcmp(caData->permission, "READ_ACP")) {
- grant->permission = S3PermissionReadACP;
- }
- else if (!strcmp(caData->permission, "WRITE_ACP")) {
- grant->permission = S3PermissionWriteACP;
- }
- else if (!strcmp(caData->permission, "FULL_CONTROL")) {
- grant->permission = S3PermissionFullControl;
- }
- else {
- return S3StatusBadPermission;
- }
-
- (*(caData->aclGrantCountReturn))++;
-
- string_buffer_initialize(caData->emailAddress);
- string_buffer_initialize(caData->userId);
- string_buffer_initialize(caData->userDisplayName);
- string_buffer_initialize(caData->groupUri);
- string_buffer_initialize(caData->permission);
- }
- }
-
- return S3StatusOK;
+ ConvertAclData *caData = (ConvertAclData *) callbackData;
+
+ int fit;
+
+ if (data) {
+ if (!strcmp(elementPath, "AccessControlPolicy/Owner/ID")) {
+ caData->ownerIdLen +=
+ snprintf(&(caData->ownerId[caData->ownerIdLen]),
+ S3_MAX_GRANTEE_USER_ID_SIZE - caData->ownerIdLen - 1,
+ "%.*s", dataLen, data);
+ if (caData->ownerIdLen >= S3_MAX_GRANTEE_USER_ID_SIZE) {
+ return S3StatusUserIdTooLong;
+ }
+ }
+ else if (!strcmp(elementPath, "AccessControlPolicy/Owner/"
+ "DisplayName")) {
+ caData->ownerDisplayNameLen +=
+ snprintf(&(caData->ownerDisplayName
+ [caData->ownerDisplayNameLen]),
+ S3_MAX_GRANTEE_DISPLAY_NAME_SIZE -
+ caData->ownerDisplayNameLen - 1,
+ "%.*s", dataLen, data);
+ if (caData->ownerDisplayNameLen >=
+ S3_MAX_GRANTEE_DISPLAY_NAME_SIZE) {
+ return S3StatusUserDisplayNameTooLong;
+ }
+ }
+ else if (!strcmp(elementPath,
+ "AccessControlPolicy/AccessControlList/Grant/"
+ "Grantee/EmailAddress")) {
+ // AmazonCustomerByEmail
+ string_buffer_append(caData->emailAddress, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusEmailAddressTooLong;
+ }
+ }
+ else if (!strcmp(elementPath,
+ "AccessControlPolicy/AccessControlList/Grant/"
+ "Grantee/ID")) {
+ // CanonicalUser
+ string_buffer_append(caData->userId, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusUserIdTooLong;
+ }
+ }
+ else if (!strcmp(elementPath,
+ "AccessControlPolicy/AccessControlList/Grant/"
+ "Grantee/DisplayName")) {
+ // CanonicalUser
+ string_buffer_append(caData->userDisplayName, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusUserDisplayNameTooLong;
+ }
+ }
+ else if (!strcmp(elementPath,
+ "AccessControlPolicy/AccessControlList/Grant/"
+ "Grantee/URI")) {
+ // Group
+ string_buffer_append(caData->groupUri, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusGroupUriTooLong;
+ }
+ }
+ else if (!strcmp(elementPath,
+ "AccessControlPolicy/AccessControlList/Grant/"
+ "Permission")) {
+ // Permission
+ string_buffer_append(caData->permission, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusPermissionTooLong;
+ }
+ }
+ }
+ else {
+ if (!strcmp(elementPath, "AccessControlPolicy/AccessControlList/"
+ "Grant")) {
+ // A grant has just been completed; so add the next S3AclGrant
+ // based on the values read
+ if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) {
+ return S3StatusTooManyGrants;
+ }
+
+ S3AclGrant *grant = &(caData->aclGrants
+ [*(caData->aclGrantCountReturn)]);
+
+ if (caData->emailAddress[0]) {
+ grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
+ strcpy(grant->grantee.amazonCustomerByEmail.emailAddress,
+ caData->emailAddress);
+ }
+ else if (caData->userId[0] && caData->userDisplayName[0]) {
+ grant->granteeType = S3GranteeTypeCanonicalUser;
+ strcpy(grant->grantee.canonicalUser.id, caData->userId);
+ strcpy(grant->grantee.canonicalUser.displayName,
+ caData->userDisplayName);
+ }
+ else if (caData->groupUri[0]) {
+ if (!strcmp(caData->groupUri,
+ "http://acs.amazonaws.com/groups/global/"
+ "AuthenticatedUsers")) {
+ grant->granteeType = S3GranteeTypeAllAwsUsers;
+ }
+ else if (!strcmp(caData->groupUri,
+ "http://acs.amazonaws.com/groups/global/"
+ "AllUsers")) {
+ grant->granteeType = S3GranteeTypeAllUsers;
+ }
+ else if (!strcmp(caData->groupUri,
+ "http://acs.amazonaws.com/groups/s3/"
+ "LogDelivery")) {
+ grant->granteeType = S3GranteeTypeLogDelivery;
+ }
+ else {
+ return S3StatusBadGrantee;
+ }
+ }
+ else {
+ return S3StatusBadGrantee;
+ }
+
+ if (!strcmp(caData->permission, "READ")) {
+ grant->permission = S3PermissionRead;
+ }
+ else if (!strcmp(caData->permission, "WRITE")) {
+ grant->permission = S3PermissionWrite;
+ }
+ else if (!strcmp(caData->permission, "READ_ACP")) {
+ grant->permission = S3PermissionReadACP;
+ }
+ else if (!strcmp(caData->permission, "WRITE_ACP")) {
+ grant->permission = S3PermissionWriteACP;
+ }
+ else if (!strcmp(caData->permission, "FULL_CONTROL")) {
+ grant->permission = S3PermissionFullControl;
+ }
+ else {
+ return S3StatusBadPermission;
+ }
+
+ (*(caData->aclGrantCountReturn))++;
+
+ string_buffer_initialize(caData->emailAddress);
+ string_buffer_initialize(caData->userId);
+ string_buffer_initialize(caData->userDisplayName);
+ string_buffer_initialize(caData->groupUri);
+ string_buffer_initialize(caData->permission);
+ }
+ }
+
+ return S3StatusOK;
}
S3Status S3_convert_acl(char *aclXml, char *ownerId, char *ownerDisplayName,
- int *aclGrantCountReturn, S3AclGrant *aclGrants)
+ int *aclGrantCountReturn, S3AclGrant *aclGrants)
{
- ConvertAclData data;
-
- data.ownerId = ownerId;
- data.ownerIdLen = 0;
- data.ownerId[0] = 0;
- data.ownerDisplayName = ownerDisplayName;
- data.ownerDisplayNameLen = 0;
- data.ownerDisplayName[0] = 0;
- data.aclGrantCountReturn = aclGrantCountReturn;
- data.aclGrants = aclGrants;
- *aclGrantCountReturn = 0;
- string_buffer_initialize(data.emailAddress);
- string_buffer_initialize(data.userId);
- string_buffer_initialize(data.userDisplayName);
- string_buffer_initialize(data.groupUri);
- string_buffer_initialize(data.permission);
-
- // Use a simplexml parser
- SimpleXml simpleXml;
- simplexml_initialize(&simpleXml, &convertAclXmlCallback, &data);
-
- S3Status status = simplexml_add(&simpleXml, aclXml, strlen(aclXml));
-
- simplexml_deinitialize(&simpleXml);
-
- return status;
+ ConvertAclData data;
+
+ data.ownerId = ownerId;
+ data.ownerIdLen = 0;
+ data.ownerId[0] = 0;
+ data.ownerDisplayName = ownerDisplayName;
+ data.ownerDisplayNameLen = 0;
+ data.ownerDisplayName[0] = 0;
+ data.aclGrantCountReturn = aclGrantCountReturn;
+ data.aclGrants = aclGrants;
+ *aclGrantCountReturn = 0;
+ string_buffer_initialize(data.emailAddress);
+ string_buffer_initialize(data.userId);
+ string_buffer_initialize(data.userDisplayName);
+ string_buffer_initialize(data.groupUri);
+ string_buffer_initialize(data.permission);
+
+ // Use a simplexml parser
+ SimpleXml simpleXml;
+ simplexml_initialize(&simpleXml, &convertAclXmlCallback, &data);
+
+ S3Status status = simplexml_add(&simpleXml, aclXml, strlen(aclXml));
+
+ simplexml_deinitialize(&simpleXml);
+
+ return status;
}
int S3_status_is_retryable(S3Status status)
{
- switch (status) {
- case S3StatusNameLookupError:
- case S3StatusFailedToConnect:
- case S3StatusConnectionFailed:
- case S3StatusErrorInternalError:
- case S3StatusErrorOperationAborted:
- case S3StatusErrorRequestTimeout:
- return 1;
- default:
- return 0;
- }
+ switch (status) {
+ case S3StatusNameLookupError:
+ case S3StatusFailedToConnect:
+ case S3StatusConnectionFailed:
+ case S3StatusErrorInternalError:
+ case S3StatusErrorOperationAborted:
+ case S3StatusErrorRequestTimeout:
+ return 1;
+ default:
+ return 0;
+ }
}
diff --git a/src/mingw_functions.c b/src/mingw_functions.c
index fb3ec4d..0e2b7b2 100644
--- a/src/mingw_functions.c
+++ b/src/mingw_functions.c
@@ -29,91 +29,91 @@
unsigned long pthread_self()
{
- return (unsigned long) GetCurrentThreadId();
+ return (unsigned long) GetCurrentThreadId();
}
int pthread_mutex_init(pthread_mutex_t *mutex, void *v)
{
- (void) v;
+ (void) v;
- InitializeCriticalSection(&(mutex->criticalSection));
+ InitializeCriticalSection(&(mutex->criticalSection));
- return 0;
+ return 0;
}
int pthread_mutex_lock(pthread_mutex_t *mutex)
{
- EnterCriticalSection(&(mutex->criticalSection));
+ EnterCriticalSection(&(mutex->criticalSection));
- return 0;
+ return 0;
}
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
- LeaveCriticalSection(&(mutex->criticalSection));
+ LeaveCriticalSection(&(mutex->criticalSection));
- return 0;
+ return 0;
}
int pthread_mutex_destroy(pthread_mutex_t *mutex)
{
- DeleteCriticalSection(&(mutex->criticalSection));
+ DeleteCriticalSection(&(mutex->criticalSection));
- return 0;
+ return 0;
}
int uname(struct utsname *u)
{
- OSVERSIONINFO info;
- info.dwOSVersionInfoSize = sizeof(info);
-
- if (!GetVersionEx(&info)) {
- return -1;
- }
-
- u->machine = "";
-
- switch (info.dwMajorVersion) {
- case 4:
- switch (info.dwMinorVersion) {
- case 0:
- u->sysname = "Microsoft Windows NT 4.0";
- break;
- case 10:
- u->sysname = "Microsoft Windows 98";
- break;
- case 90:
- u->sysname = "Microsoft Windows Me";
- break;
- default:
- return -1;
- }
- break;
-
- case 5:
- switch (info.dwMinorVersion) {
- case 0:
- u->sysname = "Microsoft Windows 2000";
- break;
- case 1:
- u->sysname = "Microsoft Windows XP";
- break;
- case 2:
- u->sysname = "Microsoft Server 2003";
- break;
- default:
- return -1;
- }
- break;
-
- default:
- return -1;
- }
-
- return 0;
+ OSVERSIONINFO info;
+ info.dwOSVersionInfoSize = sizeof(info);
+
+ if (!GetVersionEx(&info)) {
+ return -1;
+ }
+
+ u->machine = "";
+
+ switch (info.dwMajorVersion) {
+ case 4:
+ switch (info.dwMinorVersion) {
+ case 0:
+ u->sysname = "Microsoft Windows NT 4.0";
+ break;
+ case 10:
+ u->sysname = "Microsoft Windows 98";
+ break;
+ case 90:
+ u->sysname = "Microsoft Windows Me";
+ break;
+ default:
+ return -1;
+ }
+ break;
+
+ case 5:
+ switch (info.dwMinorVersion) {
+ case 0:
+ u->sysname = "Microsoft Windows 2000";
+ break;
+ case 1:
+ u->sysname = "Microsoft Windows XP";
+ break;
+ case 2:
+ u->sysname = "Microsoft Server 2003";
+ break;
+ default:
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ return 0;
}
diff --git a/src/mingw_s3_functions.c b/src/mingw_s3_functions.c
index b3f5c49..142569d 100644
--- a/src/mingw_s3_functions.c
+++ b/src/mingw_s3_functions.c
@@ -26,12 +26,12 @@
int setenv(const char *a, const char *b, int c)
{
- (void) c;
+ (void) c;
- return SetEnvironmentVariable(a, b);
+ return SetEnvironmentVariable(a, b);
}
int unsetenv(const char *a)
{
- return SetEnvironmentVariable(a, 0);
+ return SetEnvironmentVariable(a, 0);
}
diff --git a/src/object.c b/src/object.c
index 21946b1..4c8fd1c 100644
--- a/src/object.c
+++ b/src/object.c
@@ -33,39 +33,39 @@
// put object ----------------------------------------------------------------
void S3_put_object(const S3BucketContext *bucketContext, const char *key,
- uint64_t contentLength,
- const S3PutProperties *putProperties,
- S3RequestContext *requestContext,
- const S3PutObjectHandler *handler, void *callbackData)
+ uint64_t contentLength,
+ const S3PutProperties *putProperties,
+ S3RequestContext *requestContext,
+ const S3PutObjectHandler *handler, void *callbackData)
{
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypePUT, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- key, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- putProperties, // putProperties
- handler->responseHandler.propertiesCallback, // propertiesCallback
- handler->putObjectDataCallback, // toS3Callback
- contentLength, // toS3CallbackTotalSize
- 0, // fromS3Callback
- handler->responseHandler.completeCallback, // completeCallback
- callbackData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypePUT, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ key, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ putProperties, // putProperties
+ handler->responseHandler.propertiesCallback, // propertiesCallback
+ handler->putObjectDataCallback, // toS3Callback
+ contentLength, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ handler->responseHandler.completeCallback, // completeCallback
+ callbackData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
@@ -74,264 +74,264 @@ void S3_put_object(const S3BucketContext *bucketContext, const char *key,
typedef struct CopyObjectData
{
- SimpleXml simpleXml;
-
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
-
- int64_t *lastModifiedReturn;
- int eTagReturnSize;
- char *eTagReturn;
- int eTagReturnLen;
-
- string_buffer(lastModified, 256);
+ SimpleXml simpleXml;
+
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
+
+ int64_t *lastModifiedReturn;
+ int eTagReturnSize;
+ char *eTagReturn;
+ int eTagReturnLen;
+
+ string_buffer(lastModified, 256);
} CopyObjectData;
static S3Status copyObjectXmlCallback(const char *elementPath,
- const char *data, int dataLen,
- void *callbackData)
+ const char *data, int dataLen,
+ void *callbackData)
{
- CopyObjectData *coData = (CopyObjectData *) callbackData;
-
- int fit;
-
- if (data) {
- if (!strcmp(elementPath, "CopyObjectResult/LastModified")) {
- string_buffer_append(coData->lastModified, data, dataLen, fit);
- }
- else if (!strcmp(elementPath, "CopyObjectResult/ETag")) {
- if (coData->eTagReturnSize && coData->eTagReturn) {
- coData->eTagReturnLen +=
- snprintf(&(coData->eTagReturn[coData->eTagReturnLen]),
- coData->eTagReturnSize -
- coData->eTagReturnLen - 1,
- "%.*s", dataLen, data);
- if (coData->eTagReturnLen >= coData->eTagReturnSize) {
- return S3StatusXmlParseFailure;
- }
- }
- }
- }
-
- return S3StatusOK;
+ CopyObjectData *coData = (CopyObjectData *) callbackData;
+
+ int fit;
+
+ if (data) {
+ if (!strcmp(elementPath, "CopyObjectResult/LastModified")) {
+ string_buffer_append(coData->lastModified, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath, "CopyObjectResult/ETag")) {
+ if (coData->eTagReturnSize && coData->eTagReturn) {
+ coData->eTagReturnLen +=
+ snprintf(&(coData->eTagReturn[coData->eTagReturnLen]),
+ coData->eTagReturnSize -
+ coData->eTagReturnLen - 1,
+ "%.*s", dataLen, data);
+ if (coData->eTagReturnLen >= coData->eTagReturnSize) {
+ return S3StatusXmlParseFailure;
+ }
+ }
+ }
+ }
+
+ return S3StatusOK;
}
static S3Status copyObjectPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- CopyObjectData *coData = (CopyObjectData *) callbackData;
-
- return (*(coData->responsePropertiesCallback))
- (responseProperties, coData->callbackData);
+ CopyObjectData *coData = (CopyObjectData *) callbackData;
+
+ return (*(coData->responsePropertiesCallback))
+ (responseProperties, coData->callbackData);
}
static S3Status copyObjectDataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- CopyObjectData *coData = (CopyObjectData *) callbackData;
+ CopyObjectData *coData = (CopyObjectData *) callbackData;
- return simplexml_add(&(coData->simpleXml), buffer, bufferSize);
+ return simplexml_add(&(coData->simpleXml), buffer, bufferSize);
}
static void copyObjectCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- CopyObjectData *coData = (CopyObjectData *) callbackData;
+ CopyObjectData *coData = (CopyObjectData *) callbackData;
- if (coData->lastModifiedReturn) {
- time_t lastModified = -1;
- if (coData->lastModifiedLen) {
- lastModified = parseIso8601Time(coData->lastModified);
- }
+ if (coData->lastModifiedReturn) {
+ time_t lastModified = -1;
+ if (coData->lastModifiedLen) {
+ lastModified = parseIso8601Time(coData->lastModified);
+ }
- *(coData->lastModifiedReturn) = lastModified;
- }
+ *(coData->lastModifiedReturn) = lastModified;
+ }
- (*(coData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, coData->callbackData);
+ (*(coData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, coData->callbackData);
- simplexml_deinitialize(&(coData->simpleXml));
+ simplexml_deinitialize(&(coData->simpleXml));
- free(coData);
+ free(coData);
}
void S3_copy_object(const S3BucketContext *bucketContext, const char *key,
- const char *destinationBucket, const char *destinationKey,
- const S3PutProperties *putProperties,
- int64_t *lastModifiedReturn, int eTagReturnSize,
- char *eTagReturn, S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ const char *destinationBucket, const char *destinationKey,
+ const S3PutProperties *putProperties,
+ int64_t *lastModifiedReturn, int eTagReturnSize,
+ char *eTagReturn, S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Create the callback data
- CopyObjectData *data =
- (CopyObjectData *) malloc(sizeof(CopyObjectData));
- if (!data) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- simplexml_initialize(&(data->simpleXml), &copyObjectXmlCallback, data);
-
- data->responsePropertiesCallback = handler->propertiesCallback;
- data->responseCompleteCallback = handler->completeCallback;
- data->callbackData = callbackData;
-
- data->lastModifiedReturn = lastModifiedReturn;
- data->eTagReturnSize = eTagReturnSize;
- data->eTagReturn = eTagReturn;
- if (data->eTagReturnSize && data->eTagReturn) {
- data->eTagReturn[0] = 0;
- }
- data->eTagReturnLen = 0;
- string_buffer_initialize(data->lastModified);
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeCOPY, // httpRequestType
- { destinationBucket ? destinationBucket :
- bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- destinationKey ? destinationKey : key, // key
- 0, // queryParams
- 0, // subResource
- bucketContext->bucketName, // copySourceBucketName
- key, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- putProperties, // putProperties
- &copyObjectPropertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- &copyObjectDataCallback, // fromS3Callback
- &copyObjectCompleteCallback, // completeCallback
- data // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create the callback data
+ CopyObjectData *data =
+ (CopyObjectData *) malloc(sizeof(CopyObjectData));
+ if (!data) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ simplexml_initialize(&(data->simpleXml), &copyObjectXmlCallback, data);
+
+ data->responsePropertiesCallback = handler->propertiesCallback;
+ data->responseCompleteCallback = handler->completeCallback;
+ data->callbackData = callbackData;
+
+ data->lastModifiedReturn = lastModifiedReturn;
+ data->eTagReturnSize = eTagReturnSize;
+ data->eTagReturn = eTagReturn;
+ if (data->eTagReturnSize && data->eTagReturn) {
+ data->eTagReturn[0] = 0;
+ }
+ data->eTagReturnLen = 0;
+ string_buffer_initialize(data->lastModified);
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeCOPY, // httpRequestType
+ { destinationBucket ? destinationBucket :
+ bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ destinationKey ? destinationKey : key, // key
+ 0, // queryParams
+ 0, // subResource
+ bucketContext->bucketName, // copySourceBucketName
+ key, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ putProperties, // putProperties
+ &copyObjectPropertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ &copyObjectDataCallback, // fromS3Callback
+ &copyObjectCompleteCallback, // completeCallback
+ data // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
// get object ----------------------------------------------------------------
void S3_get_object(const S3BucketContext *bucketContext, const char *key,
- const S3GetConditions *getConditions,
- uint64_t startByte, uint64_t byteCount,
- S3RequestContext *requestContext,
- const S3GetObjectHandler *handler, void *callbackData)
+ const S3GetConditions *getConditions,
+ uint64_t startByte, uint64_t byteCount,
+ S3RequestContext *requestContext,
+ const S3GetObjectHandler *handler, void *callbackData)
{
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeGET, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- key, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- getConditions, // getConditions
- startByte, // startByte
- byteCount, // byteCount
- 0, // putProperties
- handler->responseHandler.propertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- handler->getObjectDataCallback, // fromS3Callback
- handler->responseHandler.completeCallback, // completeCallback
- callbackData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeGET, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ key, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ getConditions, // getConditions
+ startByte, // startByte
+ byteCount, // byteCount
+ 0, // putProperties
+ handler->responseHandler.propertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ handler->getObjectDataCallback, // fromS3Callback
+ handler->responseHandler.completeCallback, // completeCallback
+ callbackData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
// head object ---------------------------------------------------------------
void S3_head_object(const S3BucketContext *bucketContext, const char *key,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeHEAD, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- key, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- handler->propertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- 0, // fromS3Callback
- handler->completeCallback, // completeCallback
- callbackData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeHEAD, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ key, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ handler->propertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ handler->completeCallback, // completeCallback
+ callbackData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
-
+
// delete object --------------------------------------------------------------
void S3_delete_object(const S3BucketContext *bucketContext, const char *key,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler, void *callbackData)
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler, void *callbackData)
{
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeDELETE, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- key, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- handler->propertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- 0, // fromS3Callback
- handler->completeCallback, // completeCallback
- callbackData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeDELETE, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ key, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ handler->propertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ handler->completeCallback, // completeCallback
+ callbackData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
diff --git a/src/request.c b/src/request.c
index 94a36bd..1b9e386 100644
--- a/src/request.c
+++ b/src/request.c
@@ -49,60 +49,60 @@ static int requestStackCountG;
typedef struct RequestComputedValues
{
- // All x-amz- headers, in normalized form (i.e. NAME: VALUE, no other ws)
- char *amzHeaders[S3_MAX_METADATA_COUNT + 2]; // + 2 for acl and date
+ // All x-amz- headers, in normalized form (i.e. NAME: VALUE, no other ws)
+ char *amzHeaders[S3_MAX_METADATA_COUNT + 2]; // + 2 for acl and date
- // The number of x-amz- headers
- int amzHeadersCount;
+ // The number of x-amz- headers
+ int amzHeadersCount;
- // Storage for amzHeaders (the +256 is for x-amz-acl and x-amz-date)
- char amzHeadersRaw[COMPACTED_METADATA_BUFFER_SIZE + 256 + 1];
+ // Storage for amzHeaders (the +256 is for x-amz-acl and x-amz-date)
+ char amzHeadersRaw[COMPACTED_METADATA_BUFFER_SIZE + 256 + 1];
- // Canonicalized x-amz- headers
- string_multibuffer(canonicalizedAmzHeaders,
- COMPACTED_METADATA_BUFFER_SIZE + 256 + 1);
+ // Canonicalized x-amz- headers
+ string_multibuffer(canonicalizedAmzHeaders,
+ COMPACTED_METADATA_BUFFER_SIZE + 256 + 1);
- // URL-Encoded key
- char urlEncodedKey[MAX_URLENCODED_KEY_SIZE + 1];
+ // URL-Encoded key
+ char urlEncodedKey[MAX_URLENCODED_KEY_SIZE + 1];
- // Canonicalized resource
- char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
+ // Canonicalized resource
+ char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
- // Cache-Control header (or empty)
- char cacheControlHeader[128];
+ // Cache-Control header (or empty)
+ char cacheControlHeader[128];
- // Content-Type header (or empty)
- char contentTypeHeader[128];
+ // Content-Type header (or empty)
+ char contentTypeHeader[128];
- // Content-MD5 header (or empty)
- char md5Header[128];
+ // Content-MD5 header (or empty)
+ char md5Header[128];
- // Content-Disposition header (or empty)
- char contentDispositionHeader[128];
+ // Content-Disposition header (or empty)
+ char contentDispositionHeader[128];
- // Content-Encoding header (or empty)
- char contentEncodingHeader[128];
+ // Content-Encoding header (or empty)
+ char contentEncodingHeader[128];
- // Expires header (or empty)
- char expiresHeader[128];
+ // Expires header (or empty)
+ char expiresHeader[128];
- // If-Modified-Since header
- char ifModifiedSinceHeader[128];
+ // If-Modified-Since header
+ char ifModifiedSinceHeader[128];
- // If-Unmodified-Since header
- char ifUnmodifiedSinceHeader[128];
+ // If-Unmodified-Since header
+ char ifUnmodifiedSinceHeader[128];
- // If-Match header
- char ifMatchHeader[128];
+ // If-Match header
+ char ifMatchHeader[128];
- // If-None-Match header
- char ifNoneMatchHeader[128];
+ // If-None-Match header
+ char ifNoneMatchHeader[128];
- // Range header
- char rangeHeader[128];
+ // Range header
+ char rangeHeader[128];
- // Authorization header
- char authorizationHeader[128];
+ // Authorization header
+ char authorizationHeader[128];
} RequestComputedValues;
@@ -112,126 +112,126 @@ typedef struct RequestComputedValues
// zero on failure.
static void request_headers_done(Request *request)
{
- if (request->propertiesCallbackMade) {
- return;
- }
-
- request->propertiesCallbackMade = 1;
-
- // Get the http response code
- long httpResponseCode;
- request->httpResponseCode = 0;
- if (curl_easy_getinfo(request->curl, CURLINFO_RESPONSE_CODE,
- &httpResponseCode) != CURLE_OK) {
- // Not able to get the HTTP response code - error
- request->status = S3StatusInternalError;
- return;
- }
- else {
- request->httpResponseCode = httpResponseCode;
- }
-
- response_headers_handler_done(&(request->responseHeadersHandler),
- request->curl);
-
- // Only make the callback if it was a successful request; otherwise we're
- // returning information about the error response itself
- if (request->propertiesCallback &&
- (request->httpResponseCode >= 200) &&
- (request->httpResponseCode <= 299)) {
- request->status = (*(request->propertiesCallback))
- (&(request->responseHeadersHandler.responseProperties),
- request->callbackData);
- }
+ if (request->propertiesCallbackMade) {
+ return;
+ }
+
+ request->propertiesCallbackMade = 1;
+
+ // Get the http response code
+ long httpResponseCode;
+ request->httpResponseCode = 0;
+ if (curl_easy_getinfo(request->curl, CURLINFO_RESPONSE_CODE,
+ &httpResponseCode) != CURLE_OK) {
+ // Not able to get the HTTP response code - error
+ request->status = S3StatusInternalError;
+ return;
+ }
+ else {
+ request->httpResponseCode = httpResponseCode;
+ }
+
+ response_headers_handler_done(&(request->responseHeadersHandler),
+ request->curl);
+
+ // Only make the callback if it was a successful request; otherwise we're
+ // returning information about the error response itself
+ if (request->propertiesCallback &&
+ (request->httpResponseCode >= 200) &&
+ (request->httpResponseCode <= 299)) {
+ request->status = (*(request->propertiesCallback))
+ (&(request->responseHeadersHandler.responseProperties),
+ request->callbackData);
+ }
}
static size_t curl_header_func(void *ptr, size_t size, size_t nmemb,
- void *data)
+ void *data)
{
- Request *request = (Request *) data;
+ Request *request = (Request *) data;
- int len = size * nmemb;
+ int len = size * nmemb;
- response_headers_handler_add
- (&(request->responseHeadersHandler), (char *) ptr, len);
+ response_headers_handler_add
+ (&(request->responseHeadersHandler), (char *) ptr, len);
- return len;
+ return len;
}
static size_t curl_read_func(void *ptr, size_t size, size_t nmemb, void *data)
{
- Request *request = (Request *) data;
-
- int len = size * nmemb;
-
- request_headers_done(request);
-
- if (request->status != S3StatusOK) {
- return CURL_READFUNC_ABORT;
- }
-
- // If there is no data callback, or the data callback has already returned
- // contentLength bytes, return 0;
- if (!request->toS3Callback || !request->toS3CallbackBytesRemaining) {
- return 0;
- }
-
- // Don't tell the callback that we are willing to accept more data than we
- // really are
- if (len > request->toS3CallbackBytesRemaining) {
- len = request->toS3CallbackBytesRemaining;
- }
-
- // Otherwise, make the data callback
- int ret = (*(request->toS3Callback))
- (len, (char *) ptr, request->callbackData);
- if (ret < 0) {
- request->status = S3StatusAbortedByCallback;
- return CURL_READFUNC_ABORT;
- }
- else {
- if (ret > request->toS3CallbackBytesRemaining) {
- ret = request->toS3CallbackBytesRemaining;
- }
- request->toS3CallbackBytesRemaining -= ret;
- return ret;
- }
+ Request *request = (Request *) data;
+
+ int len = size * nmemb;
+
+ request_headers_done(request);
+
+ if (request->status != S3StatusOK) {
+ return CURL_READFUNC_ABORT;
+ }
+
+ // If there is no data callback, or the data callback has already returned
+ // contentLength bytes, return 0;
+ if (!request->toS3Callback || !request->toS3CallbackBytesRemaining) {
+ return 0;
+ }
+
+ // Don't tell the callback that we are willing to accept more data than we
+ // really are
+ if (len > request->toS3CallbackBytesRemaining) {
+ len = request->toS3CallbackBytesRemaining;
+ }
+
+ // Otherwise, make the data callback
+ int ret = (*(request->toS3Callback))
+ (len, (char *) ptr, request->callbackData);
+ if (ret < 0) {
+ request->status = S3StatusAbortedByCallback;
+ return CURL_READFUNC_ABORT;
+ }
+ else {
+ if (ret > request->toS3CallbackBytesRemaining) {
+ ret = request->toS3CallbackBytesRemaining;
+ }
+ request->toS3CallbackBytesRemaining -= ret;
+ return ret;
+ }
}
static size_t curl_write_func(void *ptr, size_t size, size_t nmemb,
- void *data)
+ void *data)
{
- Request *request = (Request *) data;
-
- int len = size * nmemb;
-
- request_headers_done(request);
-
- if (request->status != S3StatusOK) {
- return 0;
- }
-
- // On HTTP error, we expect to parse an HTTP error response
- if ((request->httpResponseCode < 200) ||
- (request->httpResponseCode > 299)) {
- request->status = error_parser_add
- (&(request->errorParser), (char *) ptr, len);
- }
- // If there was a callback registered, make it
- else if (request->fromS3Callback) {
- request->status = (*(request->fromS3Callback))
- (len, (char *) ptr, request->callbackData);
- }
- // Else, consider this an error - S3 has sent back data when it was not
- // expected
- else {
- request->status = S3StatusInternalError;
- }
-
- return ((request->status == S3StatusOK) ? len : 0);
+ Request *request = (Request *) data;
+
+ int len = size * nmemb;
+
+ request_headers_done(request);
+
+ if (request->status != S3StatusOK) {
+ return 0;
+ }
+
+ // On HTTP error, we expect to parse an HTTP error response
+ if ((request->httpResponseCode < 200) ||
+ (request->httpResponseCode > 299)) {
+ request->status = error_parser_add
+ (&(request->errorParser), (char *) ptr, len);
+ }
+ // If there was a callback registered, make it
+ else if (request->fromS3Callback) {
+ request->status = (*(request->fromS3Callback))
+ (len, (char *) ptr, request->callbackData);
+ }
+ // Else, consider this an error - S3 has sent back data when it was not
+ // expected
+ else {
+ request->status = S3StatusInternalError;
+ }
+
+ return ((request->status == S3StatusOK) ? len : 0);
}
@@ -240,279 +240,279 @@ static size_t curl_write_func(void *ptr, size_t size, size_t nmemb,
// them such that they all look exactly like this:
// x-amz-meta-${NAME}: ${VALUE}
// It also adds the x-amz-acl, x-amz-copy-source, and x-amz-metadata-directive
-// headers if necessary, and always adds the x-amz-date header. It copies the
+// headers if necessary, and always adds the x-amz-date header. It copies the
// raw string values into params->amzHeadersRaw, and creates an array of
// string pointers representing these headers in params->amzHeaders (and also
// sets params->amzHeadersCount to be the count of the total number of x-amz-
// headers thus created).
static S3Status compose_amz_headers(const RequestParams *params,
- RequestComputedValues *values)
+ RequestComputedValues *values)
{
- const S3PutProperties *properties = params->putProperties;
-
- values->amzHeadersCount = 0;
- values->amzHeadersRaw[0] = 0;
- int len = 0;
-
- // Append a header to amzHeaders, trimming whitespace from the end.
- // Does NOT trim whitespace from the beginning.
-#define headers_append(isNewHeader, format, ...) \
- do { \
- if (isNewHeader) { \
- values->amzHeaders[values->amzHeadersCount++] = \
- &(values->amzHeadersRaw[len]); \
- } \
- len += snprintf(&(values->amzHeadersRaw[len]), \
- sizeof(values->amzHeadersRaw) - len, \
- format, __VA_ARGS__); \
- if (len >= (int) sizeof(values->amzHeadersRaw)) { \
- return S3StatusMetaDataHeadersTooLong; \
- } \
- while ((len > 0) && (values->amzHeadersRaw[len - 1] == ' ')) { \
- len--; \
- } \
- values->amzHeadersRaw[len++] = 0; \
- } while (0)
-
-#define header_name_tolower_copy(str, l) \
- do { \
- values->amzHeaders[values->amzHeadersCount++] = \
- &(values->amzHeadersRaw[len]); \
- if ((len + l) >= (int) sizeof(values->amzHeadersRaw)) { \
- return S3StatusMetaDataHeadersTooLong; \
- } \
- int todo = l; \
- while (todo--) { \
- if ((*(str) >= 'A') && (*(str) <= 'Z')) { \
- values->amzHeadersRaw[len++] = 'a' + (*(str) - 'A'); \
- } \
- else { \
- values->amzHeadersRaw[len++] = *(str); \
- } \
- (str)++; \
- } \
- } while (0)
-
- // Check and copy in the x-amz-meta headers
- if (properties) {
- int i;
- for (i = 0; i < properties->metaDataCount; i++) {
- const S3NameValue *property = &(properties->metaData[i]);
- char headerName[S3_MAX_METADATA_SIZE - sizeof(": v")];
- int l = snprintf(headerName, sizeof(headerName),
- S3_METADATA_HEADER_NAME_PREFIX "%s",
- property->name);
- char *hn = headerName;
- header_name_tolower_copy(hn, l);
- // Copy in the value
- headers_append(0, ": %s", property->value);
- }
-
- // Add the x-amz-acl header, if necessary
- const char *cannedAclString;
- switch (params->putProperties->cannedAcl) {
- case S3CannedAclPrivate:
- cannedAclString = 0;
- break;
- case S3CannedAclPublicRead:
- cannedAclString = "public-read";
- break;
- case S3CannedAclPublicReadWrite:
- cannedAclString = "public-read-write";
- break;
- default: // S3CannedAclAuthenticatedRead
- cannedAclString = "authenticated-read";
- break;
- }
- if (cannedAclString) {
- headers_append(1, "x-amz-acl: %s", cannedAclString);
- }
- }
-
- // Add the x-amz-date header
- time_t now = time(NULL);
- char date[64];
- strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&now));
- headers_append(1, "x-amz-date: %s", date);
-
- if (params->httpRequestType == HttpRequestTypeCOPY) {
- // Add the x-amz-copy-source header
- if (params->copySourceBucketName && params->copySourceBucketName[0] &&
- params->copySourceKey && params->copySourceKey[0]) {
- headers_append(1, "x-amz-copy-source: /%s/%s",
- params->copySourceBucketName,
- params->copySourceKey);
- }
- // And the x-amz-metadata-directive header
- if (params->putProperties) {
- headers_append(1, "%s", "x-amz-metadata-directive: REPLACE");
- }
- }
-
- return S3StatusOK;
+ const S3PutProperties *properties = params->putProperties;
+
+ values->amzHeadersCount = 0;
+ values->amzHeadersRaw[0] = 0;
+ int len = 0;
+
+ // Append a header to amzHeaders, trimming whitespace from the end.
+ // Does NOT trim whitespace from the beginning.
+#define headers_append(isNewHeader, format, ...) \
+ do { \
+ if (isNewHeader) { \
+ values->amzHeaders[values->amzHeadersCount++] = \
+ &(values->amzHeadersRaw[len]); \
+ } \
+ len += snprintf(&(values->amzHeadersRaw[len]), \
+ sizeof(values->amzHeadersRaw) - len, \
+ format, __VA_ARGS__); \
+ if (len >= (int) sizeof(values->amzHeadersRaw)) { \
+ return S3StatusMetaDataHeadersTooLong; \
+ } \
+ while ((len > 0) && (values->amzHeadersRaw[len - 1] == ' ')) { \
+ len--; \
+ } \
+ values->amzHeadersRaw[len++] = 0; \
+ } while (0)
+
+#define header_name_tolower_copy(str, l) \
+ do { \
+ values->amzHeaders[values->amzHeadersCount++] = \
+ &(values->amzHeadersRaw[len]); \
+ if ((len + l) >= (int) sizeof(values->amzHeadersRaw)) { \
+ return S3StatusMetaDataHeadersTooLong; \
+ } \
+ int todo = l; \
+ while (todo--) { \
+ if ((*(str) >= 'A') && (*(str) <= 'Z')) { \
+ values->amzHeadersRaw[len++] = 'a' + (*(str) - 'A'); \
+ } \
+ else { \
+ values->amzHeadersRaw[len++] = *(str); \
+ } \
+ (str)++; \
+ } \
+ } while (0)
+
+ // Check and copy in the x-amz-meta headers
+ if (properties) {
+ int i;
+ for (i = 0; i < properties->metaDataCount; i++) {
+ const S3NameValue *property = &(properties->metaData[i]);
+ char headerName[S3_MAX_METADATA_SIZE - sizeof(": v")];
+ int l = snprintf(headerName, sizeof(headerName),
+ S3_METADATA_HEADER_NAME_PREFIX "%s",
+ property->name);
+ char *hn = headerName;
+ header_name_tolower_copy(hn, l);
+ // Copy in the value
+ headers_append(0, ": %s", property->value);
+ }
+
+ // Add the x-amz-acl header, if necessary
+ const char *cannedAclString;
+ switch (params->putProperties->cannedAcl) {
+ case S3CannedAclPrivate:
+ cannedAclString = 0;
+ break;
+ case S3CannedAclPublicRead:
+ cannedAclString = "public-read";
+ break;
+ case S3CannedAclPublicReadWrite:
+ cannedAclString = "public-read-write";
+ break;
+ default: // S3CannedAclAuthenticatedRead
+ cannedAclString = "authenticated-read";
+ break;
+ }
+ if (cannedAclString) {
+ headers_append(1, "x-amz-acl: %s", cannedAclString);
+ }
+ }
+
+ // Add the x-amz-date header
+ time_t now = time(NULL);
+ char date[64];
+ strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&now));
+ headers_append(1, "x-amz-date: %s", date);
+
+ if (params->httpRequestType == HttpRequestTypeCOPY) {
+ // Add the x-amz-copy-source header
+ if (params->copySourceBucketName && params->copySourceBucketName[0] &&
+ params->copySourceKey && params->copySourceKey[0]) {
+ headers_append(1, "x-amz-copy-source: /%s/%s",
+ params->copySourceBucketName,
+ params->copySourceKey);
+ }
+ // And the x-amz-metadata-directive header
+ if (params->putProperties) {
+ headers_append(1, "%s", "x-amz-metadata-directive: REPLACE");
+ }
+ }
+
+ return S3StatusOK;
}
// Composes the other headers
static S3Status compose_standard_headers(const RequestParams *params,
- RequestComputedValues *values)
+ RequestComputedValues *values)
{
-#define do_put_header(fmt, sourceField, destField, badError, tooLongError) \
- do { \
- if (params->putProperties && \
- params->putProperties-> sourceField && \
- params->putProperties-> sourceField[0]) { \
- /* Skip whitespace at beginning of val */ \
- const char *val = params->putProperties-> sourceField; \
- while (*val && isblank(*val)) { \
- val++; \
- } \
- if (!*val) { \
- return badError; \
- } \
- /* Compose header, make sure it all fit */ \
- int len = snprintf(values-> destField, \
- sizeof(values-> destField), fmt, val); \
- if (len >= (int) sizeof(values-> destField)) { \
- return tooLongError; \
- } \
- /* Now remove the whitespace at the end */ \
- while (isblank(values-> destField[len])) { \
- len--; \
- } \
- values-> destField[len] = 0; \
- } \
- else { \
- values-> destField[0] = 0; \
- } \
- } while (0)
-
-#define do_get_header(fmt, sourceField, destField, badError, tooLongError) \
- do { \
- if (params->getConditions && \
- params->getConditions-> sourceField && \
- params->getConditions-> sourceField[0]) { \
- /* Skip whitespace at beginning of val */ \
- const char *val = params->getConditions-> sourceField; \
- while (*val && isblank(*val)) { \
- val++; \
- } \
- if (!*val) { \
- return badError; \
- } \
- /* Compose header, make sure it all fit */ \
- int len = snprintf(values-> destField, \
- sizeof(values-> destField), fmt, val); \
- if (len >= (int) sizeof(values-> destField)) { \
- return tooLongError; \
- } \
- /* Now remove the whitespace at the end */ \
- while (isblank(values-> destField[len])) { \
- len--; \
- } \
- values-> destField[len] = 0; \
- } \
- else { \
- values-> destField[0] = 0; \
- } \
- } while (0)
-
- // Cache-Control
- do_put_header("Cache-Control: %s", cacheControl, cacheControlHeader,
- S3StatusBadCacheControl, S3StatusCacheControlTooLong);
-
- // ContentType
- do_put_header("Content-Type: %s", contentType, contentTypeHeader,
- S3StatusBadContentType, S3StatusContentTypeTooLong);
-
- // MD5
- do_put_header("Content-MD5: %s", md5, md5Header, S3StatusBadMD5,
- S3StatusMD5TooLong);
-
- // Content-Disposition
- do_put_header("Content-Disposition: attachment; filename=\"%s\"",
- contentDispositionFilename, contentDispositionHeader,
- S3StatusBadContentDispositionFilename,
- S3StatusContentDispositionFilenameTooLong);
-
- // ContentEncoding
- do_put_header("Content-Encoding: %s", contentEncoding,
- contentEncodingHeader, S3StatusBadContentEncoding,
- S3StatusContentEncodingTooLong);
-
- // Expires
- if (params->putProperties && (params->putProperties->expires >= 0)) {
- time_t t = (time_t) params->putProperties->expires;
- strftime(values->expiresHeader, sizeof(values->expiresHeader),
- "Expires: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t));
- }
- else {
- values->expiresHeader[0] = 0;
- }
-
- // If-Modified-Since
- if (params->getConditions &&
- (params->getConditions->ifModifiedSince >= 0)) {
- time_t t = (time_t) params->getConditions->ifModifiedSince;
- strftime(values->ifModifiedSinceHeader,
- sizeof(values->ifModifiedSinceHeader),
- "If-Modified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t));
- }
- else {
- values->ifModifiedSinceHeader[0] = 0;
- }
-
- // If-Unmodified-Since header
- if (params->getConditions &&
- (params->getConditions->ifNotModifiedSince >= 0)) {
- time_t t = (time_t) params->getConditions->ifNotModifiedSince;
- strftime(values->ifUnmodifiedSinceHeader,
- sizeof(values->ifUnmodifiedSinceHeader),
- "If-Unmodified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t));
- }
- else {
- values->ifUnmodifiedSinceHeader[0] = 0;
- }
-
- // If-Match header
- do_get_header("If-Match: %s", ifMatchETag, ifMatchHeader,
- S3StatusBadIfMatchETag, S3StatusIfMatchETagTooLong);
-
- // If-None-Match header
- do_get_header("If-None-Match: %s", ifNotMatchETag, ifNoneMatchHeader,
- S3StatusBadIfNotMatchETag,
- S3StatusIfNotMatchETagTooLong);
-
- // Range header
- if (params->startByte || params->byteCount) {
- if (params->byteCount) {
- snprintf(values->rangeHeader, sizeof(values->rangeHeader),
- "Range: bytes=%llu-%llu",
- (unsigned long long) params->startByte,
- (unsigned long long) (params->startByte +
- params->byteCount - 1));
- }
- else {
- snprintf(values->rangeHeader, sizeof(values->rangeHeader),
- "Range: bytes=%llu-",
- (unsigned long long) params->startByte);
- }
- }
- else {
- values->rangeHeader[0] = 0;
- }
-
- return S3StatusOK;
+#define do_put_header(fmt, sourceField, destField, badError, tooLongError) \
+ do { \
+ if (params->putProperties && \
+ params->putProperties-> sourceField && \
+ params->putProperties-> sourceField[0]) { \
+ /* Skip whitespace at beginning of val */ \
+ const char *val = params->putProperties-> sourceField; \
+ while (*val && isblank(*val)) { \
+ val++; \
+ } \
+ if (!*val) { \
+ return badError; \
+ } \
+ /* Compose header, make sure it all fit */ \
+ int len = snprintf(values-> destField, \
+ sizeof(values-> destField), fmt, val); \
+ if (len >= (int) sizeof(values-> destField)) { \
+ return tooLongError; \
+ } \
+ /* Now remove the whitespace at the end */ \
+ while (isblank(values-> destField[len])) { \
+ len--; \
+ } \
+ values-> destField[len] = 0; \
+ } \
+ else { \
+ values-> destField[0] = 0; \
+ } \
+ } while (0)
+
+#define do_get_header(fmt, sourceField, destField, badError, tooLongError) \
+ do { \
+ if (params->getConditions && \
+ params->getConditions-> sourceField && \
+ params->getConditions-> sourceField[0]) { \
+ /* Skip whitespace at beginning of val */ \
+ const char *val = params->getConditions-> sourceField; \
+ while (*val && isblank(*val)) { \
+ val++; \
+ } \
+ if (!*val) { \
+ return badError; \
+ } \
+ /* Compose header, make sure it all fit */ \
+ int len = snprintf(values-> destField, \
+ sizeof(values-> destField), fmt, val); \
+ if (len >= (int) sizeof(values-> destField)) { \
+ return tooLongError; \
+ } \
+ /* Now remove the whitespace at the end */ \
+ while (isblank(values-> destField[len])) { \
+ len--; \
+ } \
+ values-> destField[len] = 0; \
+ } \
+ else { \
+ values-> destField[0] = 0; \
+ } \
+ } while (0)
+
+ // Cache-Control
+ do_put_header("Cache-Control: %s", cacheControl, cacheControlHeader,
+ S3StatusBadCacheControl, S3StatusCacheControlTooLong);
+
+ // ContentType
+ do_put_header("Content-Type: %s", contentType, contentTypeHeader,
+ S3StatusBadContentType, S3StatusContentTypeTooLong);
+
+ // MD5
+ do_put_header("Content-MD5: %s", md5, md5Header, S3StatusBadMD5,
+ S3StatusMD5TooLong);
+
+ // Content-Disposition
+ do_put_header("Content-Disposition: attachment; filename=\"%s\"",
+ contentDispositionFilename, contentDispositionHeader,
+ S3StatusBadContentDispositionFilename,
+ S3StatusContentDispositionFilenameTooLong);
+
+ // ContentEncoding
+ do_put_header("Content-Encoding: %s", contentEncoding,
+ contentEncodingHeader, S3StatusBadContentEncoding,
+ S3StatusContentEncodingTooLong);
+
+ // Expires
+ if (params->putProperties && (params->putProperties->expires >= 0)) {
+ time_t t = (time_t) params->putProperties->expires;
+ strftime(values->expiresHeader, sizeof(values->expiresHeader),
+ "Expires: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t));
+ }
+ else {
+ values->expiresHeader[0] = 0;
+ }
+
+ // If-Modified-Since
+ if (params->getConditions &&
+ (params->getConditions->ifModifiedSince >= 0)) {
+ time_t t = (time_t) params->getConditions->ifModifiedSince;
+ strftime(values->ifModifiedSinceHeader,
+ sizeof(values->ifModifiedSinceHeader),
+ "If-Modified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t));
+ }
+ else {
+ values->ifModifiedSinceHeader[0] = 0;
+ }
+
+ // If-Unmodified-Since header
+ if (params->getConditions &&
+ (params->getConditions->ifNotModifiedSince >= 0)) {
+ time_t t = (time_t) params->getConditions->ifNotModifiedSince;
+ strftime(values->ifUnmodifiedSinceHeader,
+ sizeof(values->ifUnmodifiedSinceHeader),
+ "If-Unmodified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t));
+ }
+ else {
+ values->ifUnmodifiedSinceHeader[0] = 0;
+ }
+
+ // If-Match header
+ do_get_header("If-Match: %s", ifMatchETag, ifMatchHeader,
+ S3StatusBadIfMatchETag, S3StatusIfMatchETagTooLong);
+
+ // If-None-Match header
+ do_get_header("If-None-Match: %s", ifNotMatchETag, ifNoneMatchHeader,
+ S3StatusBadIfNotMatchETag,
+ S3StatusIfNotMatchETagTooLong);
+
+ // Range header
+ if (params->startByte || params->byteCount) {
+ if (params->byteCount) {
+ snprintf(values->rangeHeader, sizeof(values->rangeHeader),
+ "Range: bytes=%llu-%llu",
+ (unsigned long long) params->startByte,
+ (unsigned long long) (params->startByte +
+ params->byteCount - 1));
+ }
+ else {
+ snprintf(values->rangeHeader, sizeof(values->rangeHeader),
+ "Range: bytes=%llu-",
+ (unsigned long long) params->startByte);
+ }
+ }
+ else {
+ values->rangeHeader[0] = 0;
+ }
+
+ return S3StatusOK;
}
// URL encodes the params->key value into params->urlEncodedKey
static S3Status encode_key(const RequestParams *params,
- RequestComputedValues *values)
+ RequestComputedValues *values)
{
- return (urlEncode(values->urlEncodedKey, params->key, S3_MAX_KEY_SIZE) ?
- S3StatusOK : S3StatusUriTooLong);
+ return (urlEncode(values->urlEncodedKey, params->key, S3_MAX_KEY_SIZE) ?
+ S3StatusOK : S3StatusUriTooLong);
}
@@ -521,857 +521,857 @@ static S3Status encode_key(const RequestParams *params,
// before header2 alphabetically, false if not
static int headerle(const char *header1, const char *header2)
{
- while (1) {
- if (*header1 == ':') {
- return (*header2 == ':');
- }
- else if (*header2 == ':') {
- return 0;
- }
- else if (*header2 < *header1) {
- return 0;
- }
- else if (*header2 > *header1) {
- return 1;
- }
- header1++, header2++;
- }
+ while (1) {
+ if (*header1 == ':') {
+ return (*header2 == ':');
+ }
+ else if (*header2 == ':') {
+ return 0;
+ }
+ else if (*header2 < *header1) {
+ return 0;
+ }
+ else if (*header2 > *header1) {
+ return 1;
+ }
+ header1++, header2++;
+ }
}
-// Replace this with merge sort eventually, it's the best stable sort. But
+// Replace this with merge sort eventually, it's the best stable sort. But
// since typically the number of elements being sorted is small, it doesn't
// matter that much which sort is used, and gnome sort is the world's simplest
-// stable sort. Added a slight twist to the standard gnome_sort - don't go
-// forward +1, go forward to the last highest index considered. This saves
+// stable sort. Added a slight twist to the standard gnome_sort - don't go
+// forward +1, go forward to the last highest index considered. This saves
// all the string comparisons that would be done "going forward", and thus
// only does the necessary string comparisons to move values back into their
// sorted position.
static void header_gnome_sort(const char **headers, int size)
{
- int i = 0, last_highest = 0;
-
- while (i < size) {
- if ((i == 0) || headerle(headers[i - 1], headers[i])) {
- i = ++last_highest;
- }
- else {
- const char *tmp = headers[i];
- headers[i] = headers[i - 1];
- headers[--i] = tmp;
- }
- }
+ int i = 0, last_highest = 0;
+
+ while (i < size) {
+ if ((i == 0) || headerle(headers[i - 1], headers[i])) {
+ i = ++last_highest;
+ }
+ else {
+ const char *tmp = headers[i];
+ headers[i] = headers[i - 1];
+ headers[--i] = tmp;
+ }
+ }
}
// Canonicalizes the x-amz- headers into the canonicalizedAmzHeaders buffer
static void canonicalize_amz_headers(RequestComputedValues *values)
{
- // Make a copy of the headers that will be sorted
- const char *sortedHeaders[S3_MAX_METADATA_COUNT];
-
- memcpy(sortedHeaders, values->amzHeaders,
- (values->amzHeadersCount * sizeof(sortedHeaders[0])));
-
- // Now sort these
- header_gnome_sort(sortedHeaders, values->amzHeadersCount);
-
- // Now copy this sorted list into the buffer, all the while:
- // - folding repeated headers into single lines, and
- // - folding multiple lines
- // - removing the space after the colon
- int lastHeaderLen = 0, i;
- char *buffer = values->canonicalizedAmzHeaders;
- for (i = 0; i < values->amzHeadersCount; i++) {
- const char *header = sortedHeaders[i];
- const char *c = header;
- // If the header names are the same, append the next value
- if ((i > 0) &&
- !strncmp(header, sortedHeaders[i - 1], lastHeaderLen)) {
- // Replacing the previous newline with a comma
- *(buffer - 1) = ',';
- // Skip the header name and space
- c += (lastHeaderLen + 1);
- }
- // Else this is a new header
- else {
- // Copy in everything up to the space in the ": "
- while (*c != ' ') {
- *buffer++ = *c++;
- }
- // Save the header len since it's a new header
- lastHeaderLen = c - header;
- // Skip the space
- c++;
- }
- // Now copy in the value, folding the lines
- while (*c) {
- // If c points to a \r\n[whitespace] sequence, then fold
- // this newline out
- if ((*c == '\r') && (*(c + 1) == '\n') && isblank(*(c + 2))) {
- c += 3;
- while (isblank(*c)) {
- c++;
- }
- // Also, what has most recently been copied into buffer amy
- // have been whitespace, and since we're folding whitespace
- // out around this newline sequence, back buffer up over
- // any whitespace it contains
- while (isblank(*(buffer - 1))) {
- buffer--;
- }
- continue;
- }
- *buffer++ = *c++;
- }
- // Finally, add the newline
- *buffer++ = '\n';
- }
-
- // Terminate the buffer
- *buffer = 0;
+ // Make a copy of the headers that will be sorted
+ const char *sortedHeaders[S3_MAX_METADATA_COUNT];
+
+ memcpy(sortedHeaders, values->amzHeaders,
+ (values->amzHeadersCount * sizeof(sortedHeaders[0])));
+
+ // Now sort these
+ header_gnome_sort(sortedHeaders, values->amzHeadersCount);
+
+ // Now copy this sorted list into the buffer, all the while:
+ // - folding repeated headers into single lines, and
+ // - folding multiple lines
+ // - removing the space after the colon
+ int lastHeaderLen = 0, i;
+ char *buffer = values->canonicalizedAmzHeaders;
+ for (i = 0; i < values->amzHeadersCount; i++) {
+ const char *header = sortedHeaders[i];
+ const char *c = header;
+ // If the header names are the same, append the next value
+ if ((i > 0) &&
+ !strncmp(header, sortedHeaders[i - 1], lastHeaderLen)) {
+ // Replacing the previous newline with a comma
+ *(buffer - 1) = ',';
+ // Skip the header name and space
+ c += (lastHeaderLen + 1);
+ }
+ // Else this is a new header
+ else {
+ // Copy in everything up to the space in the ": "
+ while (*c != ' ') {
+ *buffer++ = *c++;
+ }
+ // Save the header len since it's a new header
+ lastHeaderLen = c - header;
+ // Skip the space
+ c++;
+ }
+ // Now copy in the value, folding the lines
+ while (*c) {
+ // If c points to a \r\n[whitespace] sequence, then fold
+ // this newline out
+ if ((*c == '\r') && (*(c + 1) == '\n') && isblank(*(c + 2))) {
+ c += 3;
+ while (isblank(*c)) {
+ c++;
+ }
+ // Also, what has most recently been copied into buffer amy
+ // have been whitespace, and since we're folding whitespace
+ // out around this newline sequence, back buffer up over
+ // any whitespace it contains
+ while (isblank(*(buffer - 1))) {
+ buffer--;
+ }
+ continue;
+ }
+ *buffer++ = *c++;
+ }
+ // Finally, add the newline
+ *buffer++ = '\n';
+ }
+
+ // Terminate the buffer
+ *buffer = 0;
}
// Canonicalizes the resource into params->canonicalizedResource
static void canonicalize_resource(const char *bucketName,
- const char *subResource,
- const char *urlEncodedKey,
- char *buffer)
+ const char *subResource,
+ const char *urlEncodedKey,
+ char *buffer)
{
- int len = 0;
+ int len = 0;
- *buffer = 0;
+ *buffer = 0;
#define append(str) len += sprintf(&(buffer[len]), "%s", str)
- if (bucketName && bucketName[0]) {
- buffer[len++] = '/';
- append(bucketName);
- }
+ if (bucketName && bucketName[0]) {
+ buffer[len++] = '/';
+ append(bucketName);
+ }
- append("/");
+ append("/");
- if (urlEncodedKey && urlEncodedKey[0]) {
- append(urlEncodedKey);
- }
+ if (urlEncodedKey && urlEncodedKey[0]) {
+ append(urlEncodedKey);
+ }
- if (subResource && subResource[0]) {
- append("?");
- append(subResource);
- }
+ if (subResource && subResource[0]) {
+ append("?");
+ append(subResource);
+ }
}
// Convert an HttpRequestType to an HTTP Verb string
static const char *http_request_type_to_verb(HttpRequestType requestType)
{
- switch (requestType) {
- case HttpRequestTypeGET:
- return "GET";
- case HttpRequestTypeHEAD:
- return "HEAD";
- case HttpRequestTypePUT:
- case HttpRequestTypeCOPY:
- return "PUT";
- default: // HttpRequestTypeDELETE
- return "DELETE";
- }
+ switch (requestType) {
+ case HttpRequestTypeGET:
+ return "GET";
+ case HttpRequestTypeHEAD:
+ return "HEAD";
+ case HttpRequestTypePUT:
+ case HttpRequestTypeCOPY:
+ return "PUT";
+ default: // HttpRequestTypeDELETE
+ return "DELETE";
+ }
}
// Composes the Authorization header for the request
static S3Status compose_auth_header(const RequestParams *params,
- RequestComputedValues *values)
+ RequestComputedValues *values)
{
- // We allow for:
- // 17 bytes for HTTP-Verb + \n
- // 129 bytes for Content-MD5 + \n
- // 129 bytes for Content-Type + \n
- // 1 byte for empty Date + \n
- // CanonicalizedAmzHeaders & CanonicalizedResource
- char signbuf[17 + 129 + 129 + 1 +
- (sizeof(values->canonicalizedAmzHeaders) - 1) +
- (sizeof(values->canonicalizedResource) - 1) + 1];
- int len = 0;
-
-#define signbuf_append(format, ...) \
- len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \
- format, __VA_ARGS__)
-
- signbuf_append
- ("%s\n", http_request_type_to_verb(params->httpRequestType));
-
- // For MD5 and Content-Type, use the value in the actual header, because
- // it's already been trimmed
- signbuf_append("%s\n", values->md5Header[0] ?
- &(values->md5Header[sizeof("Content-MD5: ") - 1]) : "");
-
- signbuf_append
- ("%s\n", values->contentTypeHeader[0] ?
- &(values->contentTypeHeader[sizeof("Content-Type: ") - 1]) : "");
-
- signbuf_append("%s", "\n"); // Date - we always use x-amz-date
-
- signbuf_append("%s", values->canonicalizedAmzHeaders);
-
- signbuf_append("%s", values->canonicalizedResource);
-
- // Generate an HMAC-SHA-1 of the signbuf
- unsigned char hmac[20];
-
- HMAC_SHA1(hmac, (unsigned char *) params->bucketContext.secretAccessKey,
- strlen(params->bucketContext.secretAccessKey),
- (unsigned char *) signbuf, len);
-
- // Now base-64 encode the results
- char b64[((20 + 1) * 4) / 3];
- int b64Len = base64Encode(hmac, 20, b64);
-
- snprintf(values->authorizationHeader, sizeof(values->authorizationHeader),
- "Authorization: AWS %s:%.*s", params->bucketContext.accessKeyId,
- b64Len, b64);
-
- return S3StatusOK;
+ // We allow for:
+ // 17 bytes for HTTP-Verb + \n
+ // 129 bytes for Content-MD5 + \n
+ // 129 bytes for Content-Type + \n
+ // 1 byte for empty Date + \n
+ // CanonicalizedAmzHeaders & CanonicalizedResource
+ char signbuf[17 + 129 + 129 + 1 +
+ (sizeof(values->canonicalizedAmzHeaders) - 1) +
+ (sizeof(values->canonicalizedResource) - 1) + 1];
+ int len = 0;
+
+#define signbuf_append(format, ...) \
+ len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \
+ format, __VA_ARGS__)
+
+ signbuf_append
+ ("%s\n", http_request_type_to_verb(params->httpRequestType));
+
+ // For MD5 and Content-Type, use the value in the actual header, because
+ // it's already been trimmed
+ signbuf_append("%s\n", values->md5Header[0] ?
+ &(values->md5Header[sizeof("Content-MD5: ") - 1]) : "");
+
+ signbuf_append
+ ("%s\n", values->contentTypeHeader[0] ?
+ &(values->contentTypeHeader[sizeof("Content-Type: ") - 1]) : "");
+
+ signbuf_append("%s", "\n"); // Date - we always use x-amz-date
+
+ signbuf_append("%s", values->canonicalizedAmzHeaders);
+
+ signbuf_append("%s", values->canonicalizedResource);
+
+ // Generate an HMAC-SHA-1 of the signbuf
+ unsigned char hmac[20];
+
+ HMAC_SHA1(hmac, (unsigned char *) params->bucketContext.secretAccessKey,
+ strlen(params->bucketContext.secretAccessKey),
+ (unsigned char *) signbuf, len);
+
+ // Now base-64 encode the results
+ char b64[((20 + 1) * 4) / 3];
+ int b64Len = base64Encode(hmac, 20, b64);
+
+ snprintf(values->authorizationHeader, sizeof(values->authorizationHeader),
+ "Authorization: AWS %s:%.*s", params->bucketContext.accessKeyId,
+ b64Len, b64);
+
+ return S3StatusOK;
}
// Compose the URI to use for the request given the request parameters
static S3Status compose_uri(char *buffer, int bufferSize,
- const S3BucketContext *bucketContext,
- const char *urlEncodedKey,
- const char *subResource, const char *queryParams)
+ const S3BucketContext *bucketContext,
+ const char *urlEncodedKey,
+ const char *subResource, const char *queryParams)
{
- int len = 0;
-
-#define uri_append(fmt, ...) \
- do { \
- len += snprintf(&(buffer[len]), bufferSize - len, fmt, __VA_ARGS__); \
- if (len >= bufferSize) { \
- return S3StatusUriTooLong; \
- } \
- } while (0)
-
- uri_append("http%s://",
- (bucketContext->protocol == S3ProtocolHTTP) ? "" : "s");
-
- if (bucketContext->bucketName &&
- bucketContext->bucketName[0]) {
- if (bucketContext->uriStyle == S3UriStyleVirtualHost) {
- uri_append("%s.s3.amazonaws.com", bucketContext->bucketName);
- }
- else {
- uri_append("s3.amazonaws.com/%s", bucketContext->bucketName);
- }
- }
- else {
- uri_append("%s", "s3.amazonaws.com");
- }
-
- uri_append("%s", "/");
-
- uri_append("%s", urlEncodedKey);
-
- if (subResource && subResource[0]) {
- uri_append("?%s", subResource);
- }
-
- if (queryParams) {
- uri_append("%s%s", (subResource && subResource[0]) ? "&" : "?",
- queryParams);
- }
-
- return S3StatusOK;
+ int len = 0;
+
+#define uri_append(fmt, ...) \
+ do { \
+ len += snprintf(&(buffer[len]), bufferSize - len, fmt, __VA_ARGS__); \
+ if (len >= bufferSize) { \
+ return S3StatusUriTooLong; \
+ } \
+ } while (0)
+
+ uri_append("http%s://",
+ (bucketContext->protocol == S3ProtocolHTTP) ? "" : "s");
+
+ if (bucketContext->bucketName &&
+ bucketContext->bucketName[0]) {
+ if (bucketContext->uriStyle == S3UriStyleVirtualHost) {
+ uri_append("%s.s3.amazonaws.com", bucketContext->bucketName);
+ }
+ else {
+ uri_append("s3.amazonaws.com/%s", bucketContext->bucketName);
+ }
+ }
+ else {
+ uri_append("%s", "s3.amazonaws.com");
+ }
+
+ uri_append("%s", "/");
+
+ uri_append("%s", urlEncodedKey);
+
+ if (subResource && subResource[0]) {
+ uri_append("?%s", subResource);
+ }
+
+ if (queryParams) {
+ uri_append("%s%s", (subResource && subResource[0]) ? "&" : "?",
+ queryParams);
+ }
+
+ return S3StatusOK;
}
// Sets up the curl handle given the completely computed RequestParams
static S3Status setup_curl(Request *request,
- const RequestParams *params,
- const RequestComputedValues *values)
+ const RequestParams *params,
+ const RequestComputedValues *values)
{
- CURLcode status;
-
-#define curl_easy_setopt_safe(opt, val) \
- if ((status = curl_easy_setopt \
- (request->curl, opt, val)) != CURLE_OK) { \
- return S3StatusFailedToInitializeRequest; \
- }
-
- // Debugging only
- // curl_easy_setopt_safe(CURLOPT_VERBOSE, 1);
-
- // Set private data to request for the benefit of S3RequestContext
- curl_easy_setopt_safe(CURLOPT_PRIVATE, request);
-
- // Set header callback and data
- curl_easy_setopt_safe(CURLOPT_HEADERDATA, request);
- curl_easy_setopt_safe(CURLOPT_HEADERFUNCTION, &curl_header_func);
-
- // Set read callback, data, and readSize
- curl_easy_setopt_safe(CURLOPT_READFUNCTION, &curl_read_func);
- curl_easy_setopt_safe(CURLOPT_READDATA, request);
-
- // Set write callback and data
- curl_easy_setopt_safe(CURLOPT_WRITEFUNCTION, &curl_write_func);
- curl_easy_setopt_safe(CURLOPT_WRITEDATA, request);
-
- // Ask curl to parse the Last-Modified header. This is easier than
- // parsing it ourselves.
- curl_easy_setopt_safe(CURLOPT_FILETIME, 1);
-
- // Curl docs suggest that this is necessary for multithreaded code.
- // However, it also points out that DNS timeouts will not be honored
- // during DNS lookup, which can be worked around by using the c-ares
- // library, which we do not do yet.
- curl_easy_setopt_safe(CURLOPT_NOSIGNAL, 1);
-
- // Turn off Curl's built-in progress meter
- curl_easy_setopt_safe(CURLOPT_NOPROGRESS, 1);
-
- // xxx todo - support setting the proxy for Curl to use (can't use https
- // for proxies though)
-
- // xxx todo - support setting the network interface for Curl to use
-
- // I think this is useful - we don't need interactive performance, we need
- // to complete large operations quickly
- curl_easy_setopt_safe(CURLOPT_TCP_NODELAY, 1);
-
- // Don't use Curl's 'netrc' feature
- curl_easy_setopt_safe(CURLOPT_NETRC, CURL_NETRC_IGNORED);
-
- // Don't verify S3's certificate, there are known to be issues with
- // them sometimes
- // xxx todo - support an option for verifying the S3 CA (default false)
- curl_easy_setopt_safe(CURLOPT_SSL_VERIFYPEER, 0);
-
- // Follow any redirection directives that S3 sends
- curl_easy_setopt_safe(CURLOPT_FOLLOWLOCATION, 1);
-
- // A safety valve in case S3 goes bananas with redirects
- curl_easy_setopt_safe(CURLOPT_MAXREDIRS, 10);
-
- // Set the User-Agent; maybe Amazon will track these?
- curl_easy_setopt_safe(CURLOPT_USERAGENT, userAgentG);
-
- // Set the low speed limit and time; we abort transfers that stay at
- // less than 1K per second for more than 15 seconds.
- // xxx todo - make these configurable
- // xxx todo - allow configurable max send and receive speed
- curl_easy_setopt_safe(CURLOPT_LOW_SPEED_LIMIT, 1024);
- curl_easy_setopt_safe(CURLOPT_LOW_SPEED_TIME, 15);
-
- // Append standard headers
-#define append_standard_header(fieldName) \
- if (values-> fieldName [0]) { \
- request->headers = curl_slist_append(request->headers, \
- values-> fieldName); \
- }
-
- // Would use CURLOPT_INFILESIZE_LARGE, but it is buggy in libcurl
- if (params->httpRequestType == HttpRequestTypePUT) {
- char header[256];
- snprintf(header, sizeof(header), "Content-Length: %llu",
- (unsigned long long) params->toS3CallbackTotalSize);
- request->headers = curl_slist_append(request->headers, header);
- request->headers = curl_slist_append(request->headers,
- "Transfer-Encoding:");
- }
- else if (params->httpRequestType == HttpRequestTypeCOPY) {
- request->headers = curl_slist_append(request->headers,
- "Transfer-Encoding:");
- }
-
- append_standard_header(cacheControlHeader);
- append_standard_header(contentTypeHeader);
- append_standard_header(md5Header);
- append_standard_header(contentDispositionHeader);
- append_standard_header(contentEncodingHeader);
- append_standard_header(expiresHeader);
- append_standard_header(ifModifiedSinceHeader);
- append_standard_header(ifUnmodifiedSinceHeader);
- append_standard_header(ifMatchHeader);
- append_standard_header(ifNoneMatchHeader);
- append_standard_header(rangeHeader);
- append_standard_header(authorizationHeader);
-
- // Append x-amz- headers
- int i;
- for (i = 0; i < values->amzHeadersCount; i++) {
- request->headers =
- curl_slist_append(request->headers, values->amzHeaders[i]);
- }
-
- // Set the HTTP headers
- curl_easy_setopt_safe(CURLOPT_HTTPHEADER, request->headers);
-
- // Set URI
- curl_easy_setopt_safe(CURLOPT_URL, request->uri);
-
- // Set request type.
- switch (params->httpRequestType) {
- case HttpRequestTypeHEAD:
- curl_easy_setopt_safe(CURLOPT_NOBODY, 1);
- break;
- case HttpRequestTypePUT:
- case HttpRequestTypeCOPY:
- curl_easy_setopt_safe(CURLOPT_UPLOAD, 1);
- break;
- case HttpRequestTypeDELETE:
- curl_easy_setopt_safe(CURLOPT_CUSTOMREQUEST, "DELETE");
- break;
- default: // HttpRequestTypeGET
- break;
- }
-
- return S3StatusOK;
+ CURLcode status;
+
+#define curl_easy_setopt_safe(opt, val) \
+ if ((status = curl_easy_setopt \
+ (request->curl, opt, val)) != CURLE_OK) { \
+ return S3StatusFailedToInitializeRequest; \
+ }
+
+ // Debugging only
+ // curl_easy_setopt_safe(CURLOPT_VERBOSE, 1);
+
+ // Set private data to request for the benefit of S3RequestContext
+ curl_easy_setopt_safe(CURLOPT_PRIVATE, request);
+
+ // Set header callback and data
+ curl_easy_setopt_safe(CURLOPT_HEADERDATA, request);
+ curl_easy_setopt_safe(CURLOPT_HEADERFUNCTION, &curl_header_func);
+
+ // Set read callback, data, and readSize
+ curl_easy_setopt_safe(CURLOPT_READFUNCTION, &curl_read_func);
+ curl_easy_setopt_safe(CURLOPT_READDATA, request);
+
+ // Set write callback and data
+ curl_easy_setopt_safe(CURLOPT_WRITEFUNCTION, &curl_write_func);
+ curl_easy_setopt_safe(CURLOPT_WRITEDATA, request);
+
+ // Ask curl to parse the Last-Modified header. This is easier than
+ // parsing it ourselves.
+ curl_easy_setopt_safe(CURLOPT_FILETIME, 1);
+
+ // Curl docs suggest that this is necessary for multithreaded code.
+ // However, it also points out that DNS timeouts will not be honored
+ // during DNS lookup, which can be worked around by using the c-ares
+ // library, which we do not do yet.
+ curl_easy_setopt_safe(CURLOPT_NOSIGNAL, 1);
+
+ // Turn off Curl's built-in progress meter
+ curl_easy_setopt_safe(CURLOPT_NOPROGRESS, 1);
+
+ // xxx todo - support setting the proxy for Curl to use (can't use https
+ // for proxies though)
+
+ // xxx todo - support setting the network interface for Curl to use
+
+ // I think this is useful - we don't need interactive performance, we need
+ // to complete large operations quickly
+ curl_easy_setopt_safe(CURLOPT_TCP_NODELAY, 1);
+
+ // Don't use Curl's 'netrc' feature
+ curl_easy_setopt_safe(CURLOPT_NETRC, CURL_NETRC_IGNORED);
+
+ // Don't verify S3's certificate, there are known to be issues with
+ // them sometimes
+ // xxx todo - support an option for verifying the S3 CA (default false)
+ curl_easy_setopt_safe(CURLOPT_SSL_VERIFYPEER, 0);
+
+ // Follow any redirection directives that S3 sends
+ curl_easy_setopt_safe(CURLOPT_FOLLOWLOCATION, 1);
+
+ // A safety valve in case S3 goes bananas with redirects
+ curl_easy_setopt_safe(CURLOPT_MAXREDIRS, 10);
+
+ // Set the User-Agent; maybe Amazon will track these?
+ curl_easy_setopt_safe(CURLOPT_USERAGENT, userAgentG);
+
+ // Set the low speed limit and time; we abort transfers that stay at
+ // less than 1K per second for more than 15 seconds.
+ // xxx todo - make these configurable
+ // xxx todo - allow configurable max send and receive speed
+ curl_easy_setopt_safe(CURLOPT_LOW_SPEED_LIMIT, 1024);
+ curl_easy_setopt_safe(CURLOPT_LOW_SPEED_TIME, 15);
+
+ // Append standard headers
+#define append_standard_header(fieldName) \
+ if (values-> fieldName [0]) { \
+ request->headers = curl_slist_append(request->headers, \
+ values-> fieldName); \
+ }
+
+ // Would use CURLOPT_INFILESIZE_LARGE, but it is buggy in libcurl
+ if (params->httpRequestType == HttpRequestTypePUT) {
+ char header[256];
+ snprintf(header, sizeof(header), "Content-Length: %llu",
+ (unsigned long long) params->toS3CallbackTotalSize);
+ request->headers = curl_slist_append(request->headers, header);
+ request->headers = curl_slist_append(request->headers,
+ "Transfer-Encoding:");
+ }
+ else if (params->httpRequestType == HttpRequestTypeCOPY) {
+ request->headers = curl_slist_append(request->headers,
+ "Transfer-Encoding:");
+ }
+
+ append_standard_header(cacheControlHeader);
+ append_standard_header(contentTypeHeader);
+ append_standard_header(md5Header);
+ append_standard_header(contentDispositionHeader);
+ append_standard_header(contentEncodingHeader);
+ append_standard_header(expiresHeader);
+ append_standard_header(ifModifiedSinceHeader);
+ append_standard_header(ifUnmodifiedSinceHeader);
+ append_standard_header(ifMatchHeader);
+ append_standard_header(ifNoneMatchHeader);
+ append_standard_header(rangeHeader);
+ append_standard_header(authorizationHeader);
+
+ // Append x-amz- headers
+ int i;
+ for (i = 0; i < values->amzHeadersCount; i++) {
+ request->headers =
+ curl_slist_append(request->headers, values->amzHeaders[i]);
+ }
+
+ // Set the HTTP headers
+ curl_easy_setopt_safe(CURLOPT_HTTPHEADER, request->headers);
+
+ // Set URI
+ curl_easy_setopt_safe(CURLOPT_URL, request->uri);
+
+ // Set request type.
+ switch (params->httpRequestType) {
+ case HttpRequestTypeHEAD:
+ curl_easy_setopt_safe(CURLOPT_NOBODY, 1);
+ break;
+ case HttpRequestTypePUT:
+ case HttpRequestTypeCOPY:
+ curl_easy_setopt_safe(CURLOPT_UPLOAD, 1);
+ break;
+ case HttpRequestTypeDELETE:
+ curl_easy_setopt_safe(CURLOPT_CUSTOMREQUEST, "DELETE");
+ break;
+ default: // HttpRequestTypeGET
+ break;
+ }
+
+ return S3StatusOK;
}
static void request_deinitialize(Request *request)
{
- if (request->headers) {
- curl_slist_free_all(request->headers);
- }
-
- error_parser_deinitialize(&(request->errorParser));
-
- // curl_easy_reset prevents connections from being re-used for some
- // reason. This makes HTTP Keep-Alive meaningless and is very bad for
- // performance. But it is necessary to allow curl to work properly.
- // xxx todo figure out why
- curl_easy_reset(request->curl);
+ if (request->headers) {
+ curl_slist_free_all(request->headers);
+ }
+
+ error_parser_deinitialize(&(request->errorParser));
+
+ // curl_easy_reset prevents connections from being re-used for some
+ // reason. This makes HTTP Keep-Alive meaningless and is very bad for
+ // performance. But it is necessary to allow curl to work properly.
+ // xxx todo figure out why
+ curl_easy_reset(request->curl);
}
static S3Status request_get(const RequestParams *params,
- const RequestComputedValues *values,
- Request **reqReturn)
+ const RequestComputedValues *values,
+ Request **reqReturn)
{
- Request *request = 0;
-
- // Try to get one from the request stack. We hold the lock for the
- // shortest time possible here.
- pthread_mutex_lock(&requestStackMutexG);
-
- if (requestStackCountG) {
- request = requestStackG[--requestStackCountG];
- }
-
- pthread_mutex_unlock(&requestStackMutexG);
-
- // If we got one, deinitialize it for re-use
- if (request) {
- request_deinitialize(request);
- }
- // Else there wasn't one available in the request stack, so create one
- else {
- if (!(request = (Request *) malloc(sizeof(Request)))) {
- return S3StatusOutOfMemory;
- }
- if (!(request->curl = curl_easy_init())) {
- free(request);
- return S3StatusFailedToInitializeRequest;
- }
- }
-
- // Initialize the request
- request->prev = 0;
- request->next = 0;
-
- // Request status is initialized to no error, will be updated whenever
- // an error occurs
- request->status = S3StatusOK;
-
- S3Status status;
-
- // Start out with no headers
- request->headers = 0;
-
- // Compute the URL
- if ((status = compose_uri
- (request->uri, sizeof(request->uri),
- &(params->bucketContext), values->urlEncodedKey,
- params->subResource, params->queryParams)) != S3StatusOK) {
- curl_easy_cleanup(request->curl);
- free(request);
- return status;
- }
-
- // Set all of the curl handle options
- if ((status = setup_curl(request, params, values)) != S3StatusOK) {
- curl_easy_cleanup(request->curl);
- free(request);
- return status;
- }
-
- request->propertiesCallback = params->propertiesCallback;
-
- request->toS3Callback = params->toS3Callback;
-
- request->toS3CallbackBytesRemaining = params->toS3CallbackTotalSize;
-
- request->fromS3Callback = params->fromS3Callback;
-
- request->completeCallback = params->completeCallback;
-
- request->callbackData = params->callbackData;
-
- response_headers_handler_initialize(&(request->responseHeadersHandler));
-
- request->propertiesCallbackMade = 0;
-
- error_parser_initialize(&(request->errorParser));
-
- *reqReturn = request;
-
- return S3StatusOK;
+ Request *request = 0;
+
+ // Try to get one from the request stack. We hold the lock for the
+ // shortest time possible here.
+ pthread_mutex_lock(&requestStackMutexG);
+
+ if (requestStackCountG) {
+ request = requestStackG[--requestStackCountG];
+ }
+
+ pthread_mutex_unlock(&requestStackMutexG);
+
+ // If we got one, deinitialize it for re-use
+ if (request) {
+ request_deinitialize(request);
+ }
+ // Else there wasn't one available in the request stack, so create one
+ else {
+ if (!(request = (Request *) malloc(sizeof(Request)))) {
+ return S3StatusOutOfMemory;
+ }
+ if (!(request->curl = curl_easy_init())) {
+ free(request);
+ return S3StatusFailedToInitializeRequest;
+ }
+ }
+
+ // Initialize the request
+ request->prev = 0;
+ request->next = 0;
+
+ // Request status is initialized to no error, will be updated whenever
+ // an error occurs
+ request->status = S3StatusOK;
+
+ S3Status status;
+
+ // Start out with no headers
+ request->headers = 0;
+
+ // Compute the URL
+ if ((status = compose_uri
+ (request->uri, sizeof(request->uri),
+ &(params->bucketContext), values->urlEncodedKey,
+ params->subResource, params->queryParams)) != S3StatusOK) {
+ curl_easy_cleanup(request->curl);
+ free(request);
+ return status;
+ }
+
+ // Set all of the curl handle options
+ if ((status = setup_curl(request, params, values)) != S3StatusOK) {
+ curl_easy_cleanup(request->curl);
+ free(request);
+ return status;
+ }
+
+ request->propertiesCallback = params->propertiesCallback;
+
+ request->toS3Callback = params->toS3Callback;
+
+ request->toS3CallbackBytesRemaining = params->toS3CallbackTotalSize;
+
+ request->fromS3Callback = params->fromS3Callback;
+
+ request->completeCallback = params->completeCallback;
+
+ request->callbackData = params->callbackData;
+
+ response_headers_handler_initialize(&(request->responseHeadersHandler));
+
+ request->propertiesCallbackMade = 0;
+
+ error_parser_initialize(&(request->errorParser));
+
+ *reqReturn = request;
+
+ return S3StatusOK;
}
static void request_destroy(Request *request)
{
- request_deinitialize(request);
- curl_easy_cleanup(request->curl);
- free(request);
+ request_deinitialize(request);
+ curl_easy_cleanup(request->curl);
+ free(request);
}
static void request_release(Request *request)
{
- pthread_mutex_lock(&requestStackMutexG);
-
- // If the request stack is full, destroy this one
- if (requestStackCountG == REQUEST_STACK_SIZE) {
- pthread_mutex_unlock(&requestStackMutexG);
- request_destroy(request);
- }
- // Else put this one at the front of the request stack; we do this because
- // we want the most-recently-used curl handle to be re-used on the next
- // request, to maximize our chances of re-using a TCP connection before it
- // times out
- else {
- requestStackG[requestStackCountG++] = request;
- pthread_mutex_unlock(&requestStackMutexG);
- }
+ pthread_mutex_lock(&requestStackMutexG);
+
+ // If the request stack is full, destroy this one
+ if (requestStackCountG == REQUEST_STACK_SIZE) {
+ pthread_mutex_unlock(&requestStackMutexG);
+ request_destroy(request);
+ }
+ // Else put this one at the front of the request stack; we do this because
+ // we want the most-recently-used curl handle to be re-used on the next
+ // request, to maximize our chances of re-using a TCP connection before it
+ // times out
+ else {
+ requestStackG[requestStackCountG++] = request;
+ pthread_mutex_unlock(&requestStackMutexG);
+ }
}
S3Status request_api_initialize(const char *userAgentInfo, int flags)
{
- if (curl_global_init(CURL_GLOBAL_ALL &
- ~((flags & S3_INIT_WINSOCK) ? 0 : CURL_GLOBAL_WIN32))
- != CURLE_OK) {
- return S3StatusInternalError;
- }
-
- pthread_mutex_init(&requestStackMutexG, 0);
-
- requestStackCountG = 0;
-
- if (!userAgentInfo || !*userAgentInfo) {
- userAgentInfo = "Unknown";
- }
-
- char platform[96];
- struct utsname utsn;
- if (uname(&utsn)) {
- strncpy(platform, "Unknown", sizeof(platform));
- // Because strncpy doesn't always zero terminate
- platform[sizeof(platform) - 1] = 0;
- }
- else {
- snprintf(platform, sizeof(platform), "%s%s%s", utsn.sysname,
- utsn.machine[0] ? " " : "", utsn.machine);
- }
-
- snprintf(userAgentG, sizeof(userAgentG),
- "Mozilla/4.0 (Compatible; %s; libs3 %s.%s; %s)",
- userAgentInfo, LIBS3_VER_MAJOR, LIBS3_VER_MINOR, platform);
-
- return S3StatusOK;
+ if (curl_global_init(CURL_GLOBAL_ALL &
+ ~((flags & S3_INIT_WINSOCK) ? 0 : CURL_GLOBAL_WIN32))
+ != CURLE_OK) {
+ return S3StatusInternalError;
+ }
+
+ pthread_mutex_init(&requestStackMutexG, 0);
+
+ requestStackCountG = 0;
+
+ if (!userAgentInfo || !*userAgentInfo) {
+ userAgentInfo = "Unknown";
+ }
+
+ char platform[96];
+ struct utsname utsn;
+ if (uname(&utsn)) {
+ strncpy(platform, "Unknown", sizeof(platform));
+ // Because strncpy doesn't always zero terminate
+ platform[sizeof(platform) - 1] = 0;
+ }
+ else {
+ snprintf(platform, sizeof(platform), "%s%s%s", utsn.sysname,
+ utsn.machine[0] ? " " : "", utsn.machine);
+ }
+
+ snprintf(userAgentG, sizeof(userAgentG),
+ "Mozilla/4.0 (Compatible; %s; libs3 %s.%s; %s)",
+ userAgentInfo, LIBS3_VER_MAJOR, LIBS3_VER_MINOR, platform);
+
+ return S3StatusOK;
}
void request_api_deinitialize()
{
- pthread_mutex_destroy(&requestStackMutexG);
+ pthread_mutex_destroy(&requestStackMutexG);
- while (requestStackCountG--) {
- request_destroy(requestStackG[requestStackCountG]);
- }
+ while (requestStackCountG--) {
+ request_destroy(requestStackG[requestStackCountG]);
+ }
}
void request_perform(const RequestParams *params, S3RequestContext *context)
{
- Request *request;
- S3Status status;
-
-#define return_status(status) \
- (*(params->completeCallback))(status, 0, params->callbackData); \
- return
-
- // These will hold the computed values
- RequestComputedValues computed;
-
- // Validate the bucket name
- if (params->bucketContext.bucketName &&
- ((status = S3_validate_bucket_name
- (params->bucketContext.bucketName,
- params->bucketContext.uriStyle)) != S3StatusOK)) {
- return_status(status);
- }
-
- // Compose the amz headers
- if ((status = compose_amz_headers(params, &computed)) != S3StatusOK) {
- return_status(status);
- }
-
- // Compose standard headers
- if ((status = compose_standard_headers
- (params, &computed)) != S3StatusOK) {
- return_status(status);
- }
-
- // URL encode the key
- if ((status = encode_key(params, &computed)) != S3StatusOK) {
- return_status(status);
- }
-
- // Compute the canonicalized amz headers
- canonicalize_amz_headers(&computed);
-
- // Compute the canonicalized resource
- canonicalize_resource(params->bucketContext.bucketName,
- params->subResource, computed.urlEncodedKey,
- computed.canonicalizedResource);
-
- // Compose Authorization header
- if ((status = compose_auth_header(params, &computed)) != S3StatusOK) {
- return_status(status);
- }
-
- // Get an initialized Request structure now
- if ((status = request_get(params, &computed, &request)) != S3StatusOK) {
- return_status(status);
- }
-
- // If a RequestContext was provided, add the request to the curl multi
- if (context) {
- CURLMcode code = curl_multi_add_handle(context->curlm, request->curl);
- if (code == CURLM_OK) {
- if (context->requests) {
- request->prev = context->requests->prev;
- request->next = context->requests;
- context->requests->prev->next = request;
- context->requests->prev = request;
- }
- else {
- context->requests = request->next = request->prev = request;
- }
- }
- else {
- if (request->status == S3StatusOK) {
- request->status = (code == CURLM_OUT_OF_MEMORY) ?
- S3StatusOutOfMemory : S3StatusInternalError;
- }
- request_finish(request);
- }
- }
- // Else, perform the request immediately
- else {
- CURLcode code = curl_easy_perform(request->curl);
- if ((code != CURLE_OK) && (request->status == S3StatusOK)) {
- request->status = request_curl_code_to_status(code);
- }
- // Finish the request, ensuring that all callbacks have been made, and
- // also releases the request
- request_finish(request);
- }
+ Request *request;
+ S3Status status;
+
+#define return_status(status) \
+ (*(params->completeCallback))(status, 0, params->callbackData); \
+ return
+
+ // These will hold the computed values
+ RequestComputedValues computed;
+
+ // Validate the bucket name
+ if (params->bucketContext.bucketName &&
+ ((status = S3_validate_bucket_name
+ (params->bucketContext.bucketName,
+ params->bucketContext.uriStyle)) != S3StatusOK)) {
+ return_status(status);
+ }
+
+ // Compose the amz headers
+ if ((status = compose_amz_headers(params, &computed)) != S3StatusOK) {
+ return_status(status);
+ }
+
+ // Compose standard headers
+ if ((status = compose_standard_headers
+ (params, &computed)) != S3StatusOK) {
+ return_status(status);
+ }
+
+ // URL encode the key
+ if ((status = encode_key(params, &computed)) != S3StatusOK) {
+ return_status(status);
+ }
+
+ // Compute the canonicalized amz headers
+ canonicalize_amz_headers(&computed);
+
+ // Compute the canonicalized resource
+ canonicalize_resource(params->bucketContext.bucketName,
+ params->subResource, computed.urlEncodedKey,
+ computed.canonicalizedResource);
+
+ // Compose Authorization header
+ if ((status = compose_auth_header(params, &computed)) != S3StatusOK) {
+ return_status(status);
+ }
+
+ // Get an initialized Request structure now
+ if ((status = request_get(params, &computed, &request)) != S3StatusOK) {
+ return_status(status);
+ }
+
+ // If a RequestContext was provided, add the request to the curl multi
+ if (context) {
+ CURLMcode code = curl_multi_add_handle(context->curlm, request->curl);
+ if (code == CURLM_OK) {
+ if (context->requests) {
+ request->prev = context->requests->prev;
+ request->next = context->requests;
+ context->requests->prev->next = request;
+ context->requests->prev = request;
+ }
+ else {
+ context->requests = request->next = request->prev = request;
+ }
+ }
+ else {
+ if (request->status == S3StatusOK) {
+ request->status = (code == CURLM_OUT_OF_MEMORY) ?
+ S3StatusOutOfMemory : S3StatusInternalError;
+ }
+ request_finish(request);
+ }
+ }
+ // Else, perform the request immediately
+ else {
+ CURLcode code = curl_easy_perform(request->curl);
+ if ((code != CURLE_OK) && (request->status == S3StatusOK)) {
+ request->status = request_curl_code_to_status(code);
+ }
+ // Finish the request, ensuring that all callbacks have been made, and
+ // also releases the request
+ request_finish(request);
+ }
}
void request_finish(Request *request)
{
- // If we haven't detected this already, we now know that the headers are
- // definitely done being read in
- request_headers_done(request);
-
- // If there was no error processing the request, then possibly there was
- // an S3 error parsed, which should be converted into the request status
- if (request->status == S3StatusOK) {
- error_parser_convert_status(&(request->errorParser),
- &(request->status));
- // If there still was no error recorded, then it is possible that
- // there was in fact an error but that there was no error XML
- // detailing the error
- if ((request->status == S3StatusOK) &&
- ((request->httpResponseCode < 200) ||
- (request->httpResponseCode > 299))) {
- switch (request->httpResponseCode) {
- case 0:
- // This happens if the request never got any HTTP response
- // headers at all, we call this a ConnectionFailed error
- request->status = S3StatusConnectionFailed;
- break;
- case 100: // Some versions of libcurl erroneously set HTTP
- // status to this
- break;
- case 301:
- request->status = S3StatusErrorPermanentRedirect;
- break;
- case 307:
- request->status = S3StatusHttpErrorMovedTemporarily;
- break;
- case 400:
- request->status = S3StatusHttpErrorBadRequest;
- break;
- case 403:
- request->status = S3StatusHttpErrorForbidden;
- break;
- case 404:
- request->status = S3StatusHttpErrorNotFound;
- break;
- case 405:
- request->status = S3StatusErrorMethodNotAllowed;
- break;
- case 409:
- request->status = S3StatusHttpErrorConflict;
- break;
- case 411:
- request->status = S3StatusErrorMissingContentLength;
- break;
- case 412:
- request->status = S3StatusErrorPreconditionFailed;
- break;
- case 416:
- request->status = S3StatusErrorInvalidRange;
- break;
- case 500:
- request->status = S3StatusErrorInternalError;
- break;
- case 501:
- request->status = S3StatusErrorNotImplemented;
- break;
- case 503:
- request->status = S3StatusErrorSlowDown;
- break;
- default:
- request->status = S3StatusHttpErrorUnknown;
- break;
- }
- }
- }
-
- (*(request->completeCallback))
- (request->status, &(request->errorParser.s3ErrorDetails),
- request->callbackData);
-
- request_release(request);
+ // If we haven't detected this already, we now know that the headers are
+ // definitely done being read in
+ request_headers_done(request);
+
+ // If there was no error processing the request, then possibly there was
+ // an S3 error parsed, which should be converted into the request status
+ if (request->status == S3StatusOK) {
+ error_parser_convert_status(&(request->errorParser),
+ &(request->status));
+ // If there still was no error recorded, then it is possible that
+ // there was in fact an error but that there was no error XML
+ // detailing the error
+ if ((request->status == S3StatusOK) &&
+ ((request->httpResponseCode < 200) ||
+ (request->httpResponseCode > 299))) {
+ switch (request->httpResponseCode) {
+ case 0:
+ // This happens if the request never got any HTTP response
+ // headers at all, we call this a ConnectionFailed error
+ request->status = S3StatusConnectionFailed;
+ break;
+ case 100: // Some versions of libcurl erroneously set HTTP
+ // status to this
+ break;
+ case 301:
+ request->status = S3StatusErrorPermanentRedirect;
+ break;
+ case 307:
+ request->status = S3StatusHttpErrorMovedTemporarily;
+ break;
+ case 400:
+ request->status = S3StatusHttpErrorBadRequest;
+ break;
+ case 403:
+ request->status = S3StatusHttpErrorForbidden;
+ break;
+ case 404:
+ request->status = S3StatusHttpErrorNotFound;
+ break;
+ case 405:
+ request->status = S3StatusErrorMethodNotAllowed;
+ break;
+ case 409:
+ request->status = S3StatusHttpErrorConflict;
+ break;
+ case 411:
+ request->status = S3StatusErrorMissingContentLength;
+ break;
+ case 412:
+ request->status = S3StatusErrorPreconditionFailed;
+ break;
+ case 416:
+ request->status = S3StatusErrorInvalidRange;
+ break;
+ case 500:
+ request->status = S3StatusErrorInternalError;
+ break;
+ case 501:
+ request->status = S3StatusErrorNotImplemented;
+ break;
+ case 503:
+ request->status = S3StatusErrorSlowDown;
+ break;
+ default:
+ request->status = S3StatusHttpErrorUnknown;
+ break;
+ }
+ }
+ }
+
+ (*(request->completeCallback))
+ (request->status, &(request->errorParser.s3ErrorDetails),
+ request->callbackData);
+
+ request_release(request);
}
S3Status request_curl_code_to_status(CURLcode code)
{
- switch (code) {
- case CURLE_OUT_OF_MEMORY:
- return S3StatusOutOfMemory;
- case CURLE_COULDNT_RESOLVE_PROXY:
- case CURLE_COULDNT_RESOLVE_HOST:
- return S3StatusNameLookupError;
- case CURLE_COULDNT_CONNECT:
- return S3StatusFailedToConnect;
- case CURLE_WRITE_ERROR:
- case CURLE_OPERATION_TIMEDOUT:
- return S3StatusConnectionFailed;
- case CURLE_PARTIAL_FILE:
- return S3StatusOK;
- case CURLE_SSL_CACERT:
- return S3StatusServerFailedVerification;
- default:
- return S3StatusInternalError;
- }
+ switch (code) {
+ case CURLE_OUT_OF_MEMORY:
+ return S3StatusOutOfMemory;
+ case CURLE_COULDNT_RESOLVE_PROXY:
+ case CURLE_COULDNT_RESOLVE_HOST:
+ return S3StatusNameLookupError;
+ case CURLE_COULDNT_CONNECT:
+ return S3StatusFailedToConnect;
+ case CURLE_WRITE_ERROR:
+ case CURLE_OPERATION_TIMEDOUT:
+ return S3StatusConnectionFailed;
+ case CURLE_PARTIAL_FILE:
+ return S3StatusOK;
+ case CURLE_SSL_CACERT:
+ return S3StatusServerFailedVerification;
+ default:
+ return S3StatusInternalError;
+ }
}
S3Status S3_generate_authenticated_query_string
- (char *buffer, const S3BucketContext *bucketContext,
- const char *key, int64_t expires, const char *resource)
+ (char *buffer, const S3BucketContext *bucketContext,
+ const char *key, int64_t expires, const char *resource)
{
#define MAX_EXPIRES (((int64_t) 1 << 31) - 1)
- // S3 seems to only accept expiration dates up to the number of seconds
- // representably by a signed 32-bit integer
- if (expires < 0) {
- expires = MAX_EXPIRES;
- }
- else if (expires > MAX_EXPIRES) {
- expires = MAX_EXPIRES;
- }
-
- // xxx todo: rework this so that it can be incorporated into shared code
- // with request_perform(). It's really unfortunate that this code is not
- // shared with request_perform().
-
- // URL encode the key
- char urlEncodedKey[S3_MAX_KEY_SIZE * 3];
- if (key) {
- urlEncode(urlEncodedKey, key, strlen(key));
- }
- else {
- urlEncodedKey[0] = 0;
- }
-
- // Compute canonicalized resource
- char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE];
- canonicalize_resource(bucketContext->bucketName, resource, urlEncodedKey,
- canonicalizedResource);
-
- // We allow for:
- // 17 bytes for HTTP-Verb + \n
- // 1 byte for empty Content-MD5 + \n
- // 1 byte for empty Content-Type + \n
- // 20 bytes for Expires + \n
- // 0 bytes for CanonicalizedAmzHeaders
- // CanonicalizedResource
- char signbuf[17 + 1 + 1 + 1 + 20 + sizeof(canonicalizedResource) + 1];
- int len = 0;
-
-#define signbuf_append(format, ...) \
- len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \
- format, __VA_ARGS__)
-
- signbuf_append("%s\n", "GET"); // HTTP-Verb
- signbuf_append("%s\n", ""); // Content-MD5
- signbuf_append("%s\n", ""); // Content-Type
- signbuf_append("%llu\n", (unsigned long long) expires);
- signbuf_append("%s", canonicalizedResource);
-
- // Generate an HMAC-SHA-1 of the signbuf
- unsigned char hmac[20];
-
- HMAC_SHA1(hmac, (unsigned char *) bucketContext->secretAccessKey,
- strlen(bucketContext->secretAccessKey),
- (unsigned char *) signbuf, len);
-
- // Now base-64 encode the results
- char b64[((20 + 1) * 4) / 3];
- int b64Len = base64Encode(hmac, 20, b64);
-
- // Now urlEncode that
- char signature[sizeof(b64) * 3];
- urlEncode(signature, b64, b64Len);
-
- // Finally, compose the uri, with params:
- // ?AWSAccessKeyId=xxx[&Expires=]&Signature=xxx
- char queryParams[sizeof("AWSAccessKeyId=") + 20 +
- sizeof("&Expires=") + 20 +
- sizeof("&Signature=") + sizeof(signature) + 1];
-
- sprintf(queryParams, "AWSAccessKeyId=%s&Expires=%ld&Signature=%s",
- bucketContext->accessKeyId, (long) expires, signature);
-
- return compose_uri(buffer, S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE,
- bucketContext, urlEncodedKey, resource, queryParams);
+ // S3 seems to only accept expiration dates up to the number of seconds
+ // representably by a signed 32-bit integer
+ if (expires < 0) {
+ expires = MAX_EXPIRES;
+ }
+ else if (expires > MAX_EXPIRES) {
+ expires = MAX_EXPIRES;
+ }
+
+ // xxx todo: rework this so that it can be incorporated into shared code
+ // with request_perform(). It's really unfortunate that this code is not
+ // shared with request_perform().
+
+ // URL encode the key
+ char urlEncodedKey[S3_MAX_KEY_SIZE * 3];
+ if (key) {
+ urlEncode(urlEncodedKey, key, strlen(key));
+ }
+ else {
+ urlEncodedKey[0] = 0;
+ }
+
+ // Compute canonicalized resource
+ char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE];
+ canonicalize_resource(bucketContext->bucketName, resource, urlEncodedKey,
+ canonicalizedResource);
+
+ // We allow for:
+ // 17 bytes for HTTP-Verb + \n
+ // 1 byte for empty Content-MD5 + \n
+ // 1 byte for empty Content-Type + \n
+ // 20 bytes for Expires + \n
+ // 0 bytes for CanonicalizedAmzHeaders
+ // CanonicalizedResource
+ char signbuf[17 + 1 + 1 + 1 + 20 + sizeof(canonicalizedResource) + 1];
+ int len = 0;
+
+#define signbuf_append(format, ...) \
+ len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \
+ format, __VA_ARGS__)
+
+ signbuf_append("%s\n", "GET"); // HTTP-Verb
+ signbuf_append("%s\n", ""); // Content-MD5
+ signbuf_append("%s\n", ""); // Content-Type
+ signbuf_append("%llu\n", (unsigned long long) expires);
+ signbuf_append("%s", canonicalizedResource);
+
+ // Generate an HMAC-SHA-1 of the signbuf
+ unsigned char hmac[20];
+
+ HMAC_SHA1(hmac, (unsigned char *) bucketContext->secretAccessKey,
+ strlen(bucketContext->secretAccessKey),
+ (unsigned char *) signbuf, len);
+
+ // Now base-64 encode the results
+ char b64[((20 + 1) * 4) / 3];
+ int b64Len = base64Encode(hmac, 20, b64);
+
+ // Now urlEncode that
+ char signature[sizeof(b64) * 3];
+ urlEncode(signature, b64, b64Len);
+
+ // Finally, compose the uri, with params:
+ // ?AWSAccessKeyId=xxx[&Expires=]&Signature=xxx
+ char queryParams[sizeof("AWSAccessKeyId=") + 20 +
+ sizeof("&Expires=") + 20 +
+ sizeof("&Signature=") + sizeof(signature) + 1];
+
+ sprintf(queryParams, "AWSAccessKeyId=%s&Expires=%ld&Signature=%s",
+ bucketContext->accessKeyId, (long) expires, signature);
+
+ return compose_uri(buffer, S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE,
+ bucketContext, urlEncodedKey, resource, queryParams);
}
diff --git a/src/request_context.c b/src/request_context.c
index ed4c185..bccaee8 100644
--- a/src/request_context.c
+++ b/src/request_context.c
@@ -33,158 +33,158 @@
S3Status S3_create_request_context(S3RequestContext **requestContextReturn)
{
- *requestContextReturn =
- (S3RequestContext *) malloc(sizeof(S3RequestContext));
-
- if (!*requestContextReturn) {
- return S3StatusOutOfMemory;
- }
-
- if (!((*requestContextReturn)->curlm = curl_multi_init())) {
- free(*requestContextReturn);
- return S3StatusOutOfMemory;
- }
-
- (*requestContextReturn)->requests = 0;
-
- return S3StatusOK;
+ *requestContextReturn =
+ (S3RequestContext *) malloc(sizeof(S3RequestContext));
+
+ if (!*requestContextReturn) {
+ return S3StatusOutOfMemory;
+ }
+
+ if (!((*requestContextReturn)->curlm = curl_multi_init())) {
+ free(*requestContextReturn);
+ return S3StatusOutOfMemory;
+ }
+
+ (*requestContextReturn)->requests = 0;
+
+ return S3StatusOK;
}
void S3_destroy_request_context(S3RequestContext *requestContext)
{
- curl_multi_cleanup(requestContext->curlm);
-
- // For each request in the context, call back its done method with
- // 'interrupted' status
- Request *r = requestContext->requests, *rFirst = r;
-
- if (r) do {
- r->status = S3StatusInterrupted;
- Request *rNext = r->next;
- request_finish(r);
- r = rNext;
- } while (r != rFirst);
-
- free(requestContext);
+ curl_multi_cleanup(requestContext->curlm);
+
+ // For each request in the context, call back its done method with
+ // 'interrupted' status
+ Request *r = requestContext->requests, *rFirst = r;
+
+ if (r) do {
+ r->status = S3StatusInterrupted;
+ Request *rNext = r->next;
+ request_finish(r);
+ r = rNext;
+ } while (r != rFirst);
+
+ free(requestContext);
}
S3Status S3_runall_request_context(S3RequestContext *requestContext)
{
- int requestsRemaining;
- do {
- fd_set readfds, writefds, exceptfds;
- FD_ZERO(&readfds);
- FD_ZERO(&writefds);
- FD_ZERO(&exceptfds);
- int maxfd;
- S3Status status = S3_get_request_context_fdsets
- (requestContext, &readfds, &writefds, &exceptfds, &maxfd);
- if (status != S3StatusOK) {
- return status;
- }
- // curl will return -1 if it hasn't even created any fds yet because
- // none of the connections have started yet. In this case, don't
- // do the select at all, because it will wait forever; instead, just
- // skip it and go straight to running the underlying CURL handles
- if (maxfd != -1) {
- int64_t timeout = S3_get_request_context_timeout(requestContext);
- struct timeval tv = { timeout / 1000, (timeout % 1000) * 1000 };
- select(maxfd + 1, &readfds, &writefds, &exceptfds,
- (timeout == -1) ? 0 : &tv);
- }
- status = S3_runonce_request_context(requestContext,
- &requestsRemaining);
- if (status != S3StatusOK) {
- return status;
- }
- } while (requestsRemaining);
-
- return S3StatusOK;
+ int requestsRemaining;
+ do {
+ fd_set readfds, writefds, exceptfds;
+ FD_ZERO(&readfds);
+ FD_ZERO(&writefds);
+ FD_ZERO(&exceptfds);
+ int maxfd;
+ S3Status status = S3_get_request_context_fdsets
+ (requestContext, &readfds, &writefds, &exceptfds, &maxfd);
+ if (status != S3StatusOK) {
+ return status;
+ }
+ // curl will return -1 if it hasn't even created any fds yet because
+ // none of the connections have started yet. In this case, don't
+ // do the select at all, because it will wait forever; instead, just
+ // skip it and go straight to running the underlying CURL handles
+ if (maxfd != -1) {
+ int64_t timeout = S3_get_request_context_timeout(requestContext);
+ struct timeval tv = { timeout / 1000, (timeout % 1000) * 1000 };
+ select(maxfd + 1, &readfds, &writefds, &exceptfds,
+ (timeout == -1) ? 0 : &tv);
+ }
+ status = S3_runonce_request_context(requestContext,
+ &requestsRemaining);
+ if (status != S3StatusOK) {
+ return status;
+ }
+ } while (requestsRemaining);
+
+ return S3StatusOK;
}
S3Status S3_runonce_request_context(S3RequestContext *requestContext,
- int *requestsRemainingReturn)
+ int *requestsRemainingReturn)
{
- CURLMcode status;
-
- do {
- status = curl_multi_perform(requestContext->curlm,
- requestsRemainingReturn);
-
- switch (status) {
- case CURLM_OK:
- case CURLM_CALL_MULTI_PERFORM:
- break;
- case CURLM_OUT_OF_MEMORY:
- return S3StatusOutOfMemory;
- default:
- return S3StatusInternalError;
- }
-
- CURLMsg *msg;
- int junk;
- while ((msg = curl_multi_info_read(requestContext->curlm, &junk))) {
- if (msg->msg != CURLMSG_DONE) {
- return S3StatusInternalError;
- }
- Request *request;
- if (curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE,
- (char **) &request) != CURLE_OK) {
- return S3StatusInternalError;
- }
- // Remove the request from the list of requests
- if (request->prev == request->next) {
- // It was the only one on the list
- requestContext->requests = 0;
- }
- else {
- // It doesn't matter what the order of them are, so just in
- // case request was at the head of the list, put the one after
- // request to the head of the list
- requestContext->requests = request->next;
- request->prev->next = request->next;
- request->next->prev = request->prev;
- }
- if ((msg->data.result != CURLE_OK) &&
- (request->status == S3StatusOK)) {
- request->status = request_curl_code_to_status
- (msg->data.result);
- }
- if (curl_multi_remove_handle(requestContext->curlm,
- msg->easy_handle) != CURLM_OK) {
- return S3StatusInternalError;
- }
- // Finish the request, ensuring that all callbacks have been made,
- // and also releases the request
- request_finish(request);
- // Now, since a callback was made, there may be new requests
- // queued up to be performed immediately, so do so
- status = CURLM_CALL_MULTI_PERFORM;
- }
- } while (status == CURLM_CALL_MULTI_PERFORM);
-
- return S3StatusOK;
+ CURLMcode status;
+
+ do {
+ status = curl_multi_perform(requestContext->curlm,
+ requestsRemainingReturn);
+
+ switch (status) {
+ case CURLM_OK:
+ case CURLM_CALL_MULTI_PERFORM:
+ break;
+ case CURLM_OUT_OF_MEMORY:
+ return S3StatusOutOfMemory;
+ default:
+ return S3StatusInternalError;
+ }
+
+ CURLMsg *msg;
+ int junk;
+ while ((msg = curl_multi_info_read(requestContext->curlm, &junk))) {
+ if (msg->msg != CURLMSG_DONE) {
+ return S3StatusInternalError;
+ }
+ Request *request;
+ if (curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE,
+ (char **) &request) != CURLE_OK) {
+ return S3StatusInternalError;
+ }
+ // Remove the request from the list of requests
+ if (request->prev == request->next) {
+ // It was the only one on the list
+ requestContext->requests = 0;
+ }
+ else {
+ // It doesn't matter what the order of them are, so just in
+ // case request was at the head of the list, put the one after
+ // request to the head of the list
+ requestContext->requests = request->next;
+ request->prev->next = request->next;
+ request->next->prev = request->prev;
+ }
+ if ((msg->data.result != CURLE_OK) &&
+ (request->status == S3StatusOK)) {
+ request->status = request_curl_code_to_status
+ (msg->data.result);
+ }
+ if (curl_multi_remove_handle(requestContext->curlm,
+ msg->easy_handle) != CURLM_OK) {
+ return S3StatusInternalError;
+ }
+ // Finish the request, ensuring that all callbacks have been made,
+ // and also releases the request
+ request_finish(request);
+ // Now, since a callback was made, there may be new requests
+ // queued up to be performed immediately, so do so
+ status = CURLM_CALL_MULTI_PERFORM;
+ }
+ } while (status == CURLM_CALL_MULTI_PERFORM);
+
+ return S3StatusOK;
}
S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext,
- fd_set *readFdSet, fd_set *writeFdSet,
- fd_set *exceptFdSet, int *maxFd)
+ fd_set *readFdSet, fd_set *writeFdSet,
+ fd_set *exceptFdSet, int *maxFd)
{
- return ((curl_multi_fdset(requestContext->curlm, readFdSet, writeFdSet,
- exceptFdSet, maxFd) == CURLM_OK) ?
- S3StatusOK : S3StatusInternalError);
+ return ((curl_multi_fdset(requestContext->curlm, readFdSet, writeFdSet,
+ exceptFdSet, maxFd) == CURLM_OK) ?
+ S3StatusOK : S3StatusInternalError);
}
int64_t S3_get_request_context_timeout(S3RequestContext *requestContext)
{
- long timeout;
+ long timeout;
- if (curl_multi_timeout(requestContext->curlm, &timeout) != CURLM_OK) {
- timeout = 0;
- }
-
- return timeout;
+ if (curl_multi_timeout(requestContext->curlm, &timeout) != CURLM_OK) {
+ timeout = 0;
+ }
+
+ return timeout;
}
diff --git a/src/response_headers_handler.c b/src/response_headers_handler.c
index c190f0d..14e14a0 100644
--- a/src/response_headers_handler.c
+++ b/src/response_headers_handler.c
@@ -31,175 +31,175 @@
void response_headers_handler_initialize(ResponseHeadersHandler *handler)
{
- handler->responseProperties.requestId = 0;
- handler->responseProperties.requestId2 = 0;
- handler->responseProperties.contentType = 0;
- handler->responseProperties.contentLength = 0;
- handler->responseProperties.server = 0;
- handler->responseProperties.eTag = 0;
- handler->responseProperties.lastModified = -1;
- handler->responseProperties.metaDataCount = 0;
- handler->responseProperties.metaData = 0;
- handler->done = 0;
- string_multibuffer_initialize(handler->responsePropertyStrings);
- string_multibuffer_initialize(handler->responseMetaDataStrings);
+ handler->responseProperties.requestId = 0;
+ handler->responseProperties.requestId2 = 0;
+ handler->responseProperties.contentType = 0;
+ handler->responseProperties.contentLength = 0;
+ handler->responseProperties.server = 0;
+ handler->responseProperties.eTag = 0;
+ handler->responseProperties.lastModified = -1;
+ handler->responseProperties.metaDataCount = 0;
+ handler->responseProperties.metaData = 0;
+ handler->done = 0;
+ string_multibuffer_initialize(handler->responsePropertyStrings);
+ string_multibuffer_initialize(handler->responseMetaDataStrings);
}
void response_headers_handler_add(ResponseHeadersHandler *handler,
- char *header, int len)
+ char *header, int len)
{
- S3ResponseProperties *responseProperties = &(handler->responseProperties);
- char *end = &(header[len]);
-
- // Curl might call back the header function after the body has been
- // received, for 'chunked encoded' contents. We don't handle this as of
- // yet, and it's not clear that it would ever be useful.
- if (handler->done) {
- return;
- }
-
- // If we've already filled up the response headers, ignore this data.
- // This sucks, but it shouldn't happen - S3 should not be sending back
- // really long headers.
- if (handler->responsePropertyStringsSize ==
- (sizeof(handler->responsePropertyStrings) - 1)) {
- return;
- }
-
- // It should not be possible to have a header line less than 3 long
- if (len < 3) {
- return;
- }
-
- // Skip whitespace at beginning of header; there never should be any,
- // but just to be safe
- while (isblank(*header)) {
- header++;
- }
-
- // The header must end in \r\n, so skip back over it, and also over any
- // trailing whitespace
- end -= 3;
- while ((end > header) && isblank(*end)) {
- end--;
- }
- if (!isblank(*end)) {
- end++;
- }
-
- if (end == header) {
- // totally bogus
- return;
- }
-
- *end = 0;
-
- // Find the colon to split the header up
- char *c = header;
- while (*c && (*c != ':')) {
- c++;
- }
-
- int namelen = c - header;
-
- // Now walk c past the colon
- c++;
- // Now skip whitespace to the beginning of the value
- while (isblank(*c)) {
- c++;
- }
-
- int valuelen = (end - c) + 1, fit;
-
- if (!strncmp(header, "x-amz-request-id", namelen)) {
- responseProperties->requestId =
- string_multibuffer_current(handler->responsePropertyStrings);
- string_multibuffer_add(handler->responsePropertyStrings, c,
- valuelen, fit);
- }
- else if (!strncmp(header, "x-amz-id-2", namelen)) {
- responseProperties->requestId2 =
- string_multibuffer_current(handler->responsePropertyStrings);
- string_multibuffer_add(handler->responsePropertyStrings, c,
- valuelen, fit);
- }
- else if (!strncmp(header, "Content-Type", namelen)) {
- responseProperties->contentType =
- string_multibuffer_current(handler->responsePropertyStrings);
- string_multibuffer_add(handler->responsePropertyStrings, c,
- valuelen, fit);
- }
- else if (!strncmp(header, "Content-Length", namelen)) {
- handler->responseProperties.contentLength = 0;
- while (*c) {
- handler->responseProperties.contentLength *= 10;
- handler->responseProperties.contentLength += (*c++ - '0');
- }
- }
- else if (!strncmp(header, "Server", namelen)) {
- responseProperties->server =
- string_multibuffer_current(handler->responsePropertyStrings);
- string_multibuffer_add(handler->responsePropertyStrings, c,
- valuelen, fit);
- }
- else if (!strncmp(header, "ETag", namelen)) {
- responseProperties->eTag =
- string_multibuffer_current(handler->responsePropertyStrings);
- string_multibuffer_add(handler->responsePropertyStrings, c,
- valuelen, fit);
- }
- else if (!strncmp(header, S3_METADATA_HEADER_NAME_PREFIX,
- sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1)) {
- // Make sure there is room for another x-amz-meta header
- if (handler->responseProperties.metaDataCount ==
- sizeof(handler->responseMetaData)) {
- return;
- }
- // Copy the name in
- char *metaName = &(header[sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1]);
- int metaNameLen =
- (namelen - (sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1));
- char *copiedName =
- string_multibuffer_current(handler->responseMetaDataStrings);
- string_multibuffer_add(handler->responseMetaDataStrings, metaName,
- metaNameLen, fit);
- if (!fit) {
- return;
- }
-
- // Copy the value in
- char *copiedValue =
- string_multibuffer_current(handler->responseMetaDataStrings);
- string_multibuffer_add(handler->responseMetaDataStrings,
- c, valuelen, fit);
- if (!fit) {
- return;
- }
-
- if (!handler->responseProperties.metaDataCount) {
- handler->responseProperties.metaData =
- handler->responseMetaData;
- }
-
- S3NameValue *metaHeader =
- &(handler->responseMetaData
- [handler->responseProperties.metaDataCount++]);
- metaHeader->name = copiedName;
- metaHeader->value = copiedValue;
- }
+ S3ResponseProperties *responseProperties = &(handler->responseProperties);
+ char *end = &(header[len]);
+
+ // Curl might call back the header function after the body has been
+ // received, for 'chunked encoded' contents. We don't handle this as of
+ // yet, and it's not clear that it would ever be useful.
+ if (handler->done) {
+ return;
+ }
+
+ // If we've already filled up the response headers, ignore this data.
+ // This sucks, but it shouldn't happen - S3 should not be sending back
+ // really long headers.
+ if (handler->responsePropertyStringsSize ==
+ (sizeof(handler->responsePropertyStrings) - 1)) {
+ return;
+ }
+
+ // It should not be possible to have a header line less than 3 long
+ if (len < 3) {
+ return;
+ }
+
+ // Skip whitespace at beginning of header; there never should be any,
+ // but just to be safe
+ while (isblank(*header)) {
+ header++;
+ }
+
+ // The header must end in \r\n, so skip back over it, and also over any
+ // trailing whitespace
+ end -= 3;
+ while ((end > header) && isblank(*end)) {
+ end--;
+ }
+ if (!isblank(*end)) {
+ end++;
+ }
+
+ if (end == header) {
+ // totally bogus
+ return;
+ }
+
+ *end = 0;
+
+ // Find the colon to split the header up
+ char *c = header;
+ while (*c && (*c != ':')) {
+ c++;
+ }
+
+ int namelen = c - header;
+
+ // Now walk c past the colon
+ c++;
+ // Now skip whitespace to the beginning of the value
+ while (isblank(*c)) {
+ c++;
+ }
+
+ int valuelen = (end - c) + 1, fit;
+
+ if (!strncmp(header, "x-amz-request-id", namelen)) {
+ responseProperties->requestId =
+ string_multibuffer_current(handler->responsePropertyStrings);
+ string_multibuffer_add(handler->responsePropertyStrings, c,
+ valuelen, fit);
+ }
+ else if (!strncmp(header, "x-amz-id-2", namelen)) {
+ responseProperties->requestId2 =
+ string_multibuffer_current(handler->responsePropertyStrings);
+ string_multibuffer_add(handler->responsePropertyStrings, c,
+ valuelen, fit);
+ }
+ else if (!strncmp(header, "Content-Type", namelen)) {
+ responseProperties->contentType =
+ string_multibuffer_current(handler->responsePropertyStrings);
+ string_multibuffer_add(handler->responsePropertyStrings, c,
+ valuelen, fit);
+ }
+ else if (!strncmp(header, "Content-Length", namelen)) {
+ handler->responseProperties.contentLength = 0;
+ while (*c) {
+ handler->responseProperties.contentLength *= 10;
+ handler->responseProperties.contentLength += (*c++ - '0');
+ }
+ }
+ else if (!strncmp(header, "Server", namelen)) {
+ responseProperties->server =
+ string_multibuffer_current(handler->responsePropertyStrings);
+ string_multibuffer_add(handler->responsePropertyStrings, c,
+ valuelen, fit);
+ }
+ else if (!strncmp(header, "ETag", namelen)) {
+ responseProperties->eTag =
+ string_multibuffer_current(handler->responsePropertyStrings);
+ string_multibuffer_add(handler->responsePropertyStrings, c,
+ valuelen, fit);
+ }
+ else if (!strncmp(header, S3_METADATA_HEADER_NAME_PREFIX,
+ sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1)) {
+ // Make sure there is room for another x-amz-meta header
+ if (handler->responseProperties.metaDataCount ==
+ sizeof(handler->responseMetaData)) {
+ return;
+ }
+ // Copy the name in
+ char *metaName = &(header[sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1]);
+ int metaNameLen =
+ (namelen - (sizeof(S3_METADATA_HEADER_NAME_PREFIX) - 1));
+ char *copiedName =
+ string_multibuffer_current(handler->responseMetaDataStrings);
+ string_multibuffer_add(handler->responseMetaDataStrings, metaName,
+ metaNameLen, fit);
+ if (!fit) {
+ return;
+ }
+
+ // Copy the value in
+ char *copiedValue =
+ string_multibuffer_current(handler->responseMetaDataStrings);
+ string_multibuffer_add(handler->responseMetaDataStrings,
+ c, valuelen, fit);
+ if (!fit) {
+ return;
+ }
+
+ if (!handler->responseProperties.metaDataCount) {
+ handler->responseProperties.metaData =
+ handler->responseMetaData;
+ }
+
+ S3NameValue *metaHeader =
+ &(handler->responseMetaData
+ [handler->responseProperties.metaDataCount++]);
+ metaHeader->name = copiedName;
+ metaHeader->value = copiedValue;
+ }
}
void response_headers_handler_done(ResponseHeadersHandler *handler, CURL *curl)
{
- // Now get the last modification time from curl, since it's easiest to let
- // curl parse it
- time_t lastModified;
- if (curl_easy_getinfo
- (curl, CURLINFO_FILETIME, &lastModified) == CURLE_OK) {
- handler->responseProperties.lastModified = lastModified;
- }
-
- handler->done = 1;
+ // Now get the last modification time from curl, since it's easiest to let
+ // curl parse it
+ time_t lastModified;
+ if (curl_easy_getinfo
+ (curl, CURLINFO_FILETIME, &lastModified) == CURLE_OK) {
+ handler->responseProperties.lastModified = lastModified;
+ }
+
+ handler->done = 1;
}
diff --git a/src/s3.c b/src/s3.c
index 8a28300..885a8bc 100644
--- a/src/s3.c
+++ b/src/s3.c
@@ -102,7 +102,7 @@ static char errorDetailsG[4096] = { 0 };
#define MD5_PREFIX_LEN (sizeof(MD5_PREFIX) - 1)
#define CONTENT_DISPOSITION_FILENAME_PREFIX "contentDispositionFilename="
#define CONTENT_DISPOSITION_FILENAME_PREFIX_LEN \
- (sizeof(CONTENT_DISPOSITION_FILENAME_PREFIX) - 1)
+ (sizeof(CONTENT_DISPOSITION_FILENAME_PREFIX) - 1)
#define CONTENT_ENCODING_PREFIX "contentEncoding="
#define CONTENT_ENCODING_PREFIX_LEN (sizeof(CONTENT_ENCODING_PREFIX) - 1)
#define EXPIRES_PREFIX "expires="
@@ -113,7 +113,7 @@ static char errorDetailsG[4096] = { 0 };
#define IF_MODIFIED_SINCE_PREFIX_LEN (sizeof(IF_MODIFIED_SINCE_PREFIX) - 1)
#define IF_NOT_MODIFIED_SINCE_PREFIX "ifNotmodifiedSince="
#define IF_NOT_MODIFIED_SINCE_PREFIX_LEN \
- (sizeof(IF_NOT_MODIFIED_SINCE_PREFIX) - 1)
+ (sizeof(IF_NOT_MODIFIED_SINCE_PREFIX) - 1)
#define IF_MATCH_PREFIX "ifMatch="
#define IF_MATCH_PREFIX_LEN (sizeof(IF_MATCH_PREFIX) - 1)
#define IF_NOT_MATCH_PREFIX "ifNotMatch="
@@ -138,176 +138,176 @@ static char errorDetailsG[4096] = { 0 };
static void S3_init()
{
- S3Status status;
- if ((status = S3_initialize("s3", S3_INIT_ALL))
- != S3StatusOK) {
- fprintf(stderr, "Failed to initialize libs3: %s\n",
- S3_get_status_name(status));
- exit(-1);
- }
+ S3Status status;
+ if ((status = S3_initialize("s3", S3_INIT_ALL))
+ != S3StatusOK) {
+ fprintf(stderr, "Failed to initialize libs3: %s\n",
+ S3_get_status_name(status));
+ exit(-1);
+ }
}
static void printError()
{
- if (statusG < S3StatusErrorAccessDenied) {
- fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG));
- }
- else {
- fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG));
- fprintf(stderr, "%s\n", errorDetailsG);
- }
+ if (statusG < S3StatusErrorAccessDenied) {
+ fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG));
+ }
+ else {
+ fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG));
+ fprintf(stderr, "%s\n", errorDetailsG);
+ }
}
static void usageExit(FILE *out)
{
- fprintf(out,
+ fprintf(out,
"\n Options:\n"
"\n"
-" Command Line:\n"
+" Command Line:\n"
"\n"
-" -f/--force : force operation despite warnings\n"
-" -h/--vhost-style : use virtual-host-style URIs (default is "
- "path-style)\n"
-" -u/--unencrypted : unencrypted (use HTTP instead of HTTPS)\n"
-" -s/--show-properties : show response properties on stdout\n"
-" -r/--retries : retry retryable failures this number of times\n"
-" (default is 5)\n"
+" -f/--force : force operation despite warnings\n"
+" -h/--vhost-style : use virtual-host-style URIs (default is "
+ "path-style)\n"
+" -u/--unencrypted : unencrypted (use HTTP instead of HTTPS)\n"
+" -s/--show-properties : show response properties on stdout\n"
+" -r/--retries : retry retryable failures this number of times\n"
+" (default is 5)\n"
"\n"
-" Environment:\n"
+" Environment:\n"
"\n"
-" S3_ACCESS_KEY_ID : S3 access key ID (required)\n"
-" S3_SECRET_ACCESS_KEY : S3 secret access key (required)\n"
+" S3_ACCESS_KEY_ID : S3 access key ID (required)\n"
+" S3_SECRET_ACCESS_KEY : S3 secret access key (required)\n"
"\n"
" Commands (with <required parameters> and [optional parameters]) :\n"
"\n"
-" (NOTE: all command parameters take a value and are specified using the\n"
-" pattern parameter=value)\n"
+" (NOTE: all command parameters take a value and are specified using the\n"
+" pattern parameter=value)\n"
"\n"
-" help : Prints this help text\n"
+" help : Prints this help text\n"
"\n"
-" list : Lists owned buckets\n"
-" [allDetails] : Show full details\n"
+" list : Lists owned buckets\n"
+" [allDetails] : Show full details\n"
"\n"
-" test : Tests a bucket for existence and accessibility\n"
-" <bucket> : Bucket to test\n"
+" test : Tests a bucket for existence and accessibility\n"
+" <bucket> : Bucket to test\n"
"\n"
-" create : Create a new bucket\n"
-" <bucket> : Bucket to create\n"
-" [cannedAcl] : Canned ACL for the bucket (see Canned ACLs)\n"
-" [location] : Location for bucket (for example, EU)\n"
+" create : Create a new bucket\n"
+" <bucket> : Bucket to create\n"
+" [cannedAcl] : Canned ACL for the bucket (see Canned ACLs)\n"
+" [location] : Location for bucket (for example, EU)\n"
"\n"
-" delete : Delete a bucket or key\n"
-" <bucket>[/<key>] : Bucket or bucket/key to delete\n"
+" delete : Delete a bucket or key\n"
+" <bucket>[/<key>] : Bucket or bucket/key to delete\n"
"\n"
-" list : List bucket contents\n"
-" <bucket> : Bucket to list\n"
-" [prefix] : Prefix for results set\n"
-" [marker] : Where in results set to start listing\n"
-" [delimiter] : Delimiter for rolling up results set\n"
-" [maxkeys] : Maximum number of keys to return in results set\n"
-" [allDetails] : Show full details for each key\n"
+" list : List bucket contents\n"
+" <bucket> : Bucket to list\n"
+" [prefix] : Prefix for results set\n"
+" [marker] : Where in results set to start listing\n"
+" [delimiter] : Delimiter for rolling up results set\n"
+" [maxkeys] : Maximum number of keys to return in results set\n"
+" [allDetails] : Show full details for each key\n"
"\n"
-" getacl : Get the ACL of a bucket or key\n"
-" <bucket>[/<key>] : Bucket or bucket/key to get the ACL of\n"
-" [filename] : Output filename for ACL (default is stdout)\n"
+" getacl : Get the ACL of a bucket or key\n"
+" <bucket>[/<key>] : Bucket or bucket/key to get the ACL of\n"
+" [filename] : Output filename for ACL (default is stdout)\n"
"\n"
-" setacl : Set the ACL of a bucket or key\n"
-" <bucket>[/<key>] : Bucket or bucket/key to set the ACL of\n"
-" [filename] : Input filename for ACL (default is stdin)\n"
+" setacl : Set the ACL of a bucket or key\n"
+" <bucket>[/<key>] : Bucket or bucket/key to set the ACL of\n"
+" [filename] : Input filename for ACL (default is stdin)\n"
"\n"
-" getlogging : Get the logging status of a bucket\n"
-" <bucket> : Bucket to get the logging status of\n"
-" [filename] : Output filename for ACL (default is stdout)\n"
+" getlogging : Get the logging status of a bucket\n"
+" <bucket> : Bucket to get the logging status of\n"
+" [filename] : Output filename for ACL (default is stdout)\n"
"\n"
-" setlogging : Set the logging status of a bucket\n"
-" <bucket> : Bucket to set the logging status of\n"
-" [targetBucket] : Target bucket to log to; if not present, disables\n"
-" logging\n"
-" [targetPrefix] : Key prefix to use for logs\n"
-" [filename] : Input filename for ACL (default is stdin)\n"
+" setlogging : Set the logging status of a bucket\n"
+" <bucket> : Bucket to set the logging status of\n"
+" [targetBucket] : Target bucket to log to; if not present, disables\n"
+" logging\n"
+" [targetPrefix] : Key prefix to use for logs\n"
+" [filename] : Input filename for ACL (default is stdin)\n"
"\n"
-" put : Puts an object\n"
-" <bucket>/<key> : Bucket/key to put object to\n"
-" [filename] : Filename to read source data from "
- "(default is stdin)\n"
-" [contentLength] : How many bytes of source data to put (required if\n"
-" source file is stdin)\n"
-" [cacheControl] : Cache-Control HTTP header string to associate with\n"
-" object\n"
-" [contentType] : Content-Type HTTP header string to associate with\n"
-" object\n"
-" [md5] : MD5 for validating source data\n"
-" [contentDispositionFilename] : Content-Disposition filename string to\n"
-" associate with object\n"
-" [contentEncoding] : Content-Encoding HTTP header string to associate\n"
-" with object\n"
-" [expires] : Expiration date to associate with object\n"
-" [cannedAcl] : Canned ACL for the object (see Canned ACLs)\n"
-" [x-amz-meta-...]] : Metadata headers to associate with the object\n"
+" put : Puts an object\n"
+" <bucket>/<key> : Bucket/key to put object to\n"
+" [filename] : Filename to read source data from "
+ "(default is stdin)\n"
+" [contentLength] : How many bytes of source data to put (required if\n"
+" source file is stdin)\n"
+" [cacheControl] : Cache-Control HTTP header string to associate with\n"
+" object\n"
+" [contentType] : Content-Type HTTP header string to associate with\n"
+" object\n"
+" [md5] : MD5 for validating source data\n"
+" [contentDispositionFilename] : Content-Disposition filename string to\n"
+" associate with object\n"
+" [contentEncoding] : Content-Encoding HTTP header string to associate\n"
+" with object\n"
+" [expires] : Expiration date to associate with object\n"
+" [cannedAcl] : Canned ACL for the object (see Canned ACLs)\n"
+" [x-amz-meta-...]] : Metadata headers to associate with the object\n"
"\n"
-" copy : Copies an object; if any options are set, the "
- "entire\n"
-" metadata of the object is replaced\n"
-" <sourcebucket>/<sourcekey> : Source bucket/key\n"
-" <destbucket>/<destkey> : Destination bucket/key\n"
-" [cacheControl] : Cache-Control HTTP header string to associate with\n"
-" object\n"
-" [contentType] : Content-Type HTTP header string to associate with\n"
-" object\n"
-" [contentDispositionFilename] : Content-Disposition filename string to\n"
-" associate with object\n"
-" [contentEncoding] : Content-Encoding HTTP header string to associate\n"
-" with object\n"
-" [expires] : Expiration date to associate with object\n"
-" [cannedAcl] : Canned ACL for the object (see Canned ACLs)\n"
-" [x-amz-meta-...]] : Metadata headers to associate with the object\n"
+" copy : Copies an object; if any options are set, the "
+ "entire\n"
+" metadata of the object is replaced\n"
+" <sourcebucket>/<sourcekey> : Source bucket/key\n"
+" <destbucket>/<destkey> : Destination bucket/key\n"
+" [cacheControl] : Cache-Control HTTP header string to associate with\n"
+" object\n"
+" [contentType] : Content-Type HTTP header string to associate with\n"
+" object\n"
+" [contentDispositionFilename] : Content-Disposition filename string to\n"
+" associate with object\n"
+" [contentEncoding] : Content-Encoding HTTP header string to associate\n"
+" with object\n"
+" [expires] : Expiration date to associate with object\n"
+" [cannedAcl] : Canned ACL for the object (see Canned ACLs)\n"
+" [x-amz-meta-...]] : Metadata headers to associate with the object\n"
"\n"
-" get : Gets an object\n"
-" <buckey>/<key> : Bucket/key of object to get\n"
-" [filename] : Filename to write object data to (required if -s\n"
-" command line parameter was used)\n"
-" [ifModifiedSince] : Only return the object if it has been modified "
- "since\n"
-" this date\n"
-" [ifNotmodifiedSince] : Only return the object if it has not been "
- "modified\n"
-" since this date\n"
-" [ifMatch] : Only return the object if its ETag header matches\n"
-" this string\n"
-" [ifNotMatch] : Only return the object if its ETag header does "
- "not\n"
-" match this string\n"
-" [startByte] : First byte of byte range to return\n"
-" [byteCount] : Number of bytes of byte range to return\n"
+" get : Gets an object\n"
+" <buckey>/<key> : Bucket/key of object to get\n"
+" [filename] : Filename to write object data to (required if -s\n"
+" command line parameter was used)\n"
+" [ifModifiedSince] : Only return the object if it has been modified "
+ "since\n"
+" this date\n"
+" [ifNotmodifiedSince] : Only return the object if it has not been "
+ "modified\n"
+" since this date\n"
+" [ifMatch] : Only return the object if its ETag header matches\n"
+" this string\n"
+" [ifNotMatch] : Only return the object if its ETag header does "
+ "not\n"
+" match this string\n"
+" [startByte] : First byte of byte range to return\n"
+" [byteCount] : Number of bytes of byte range to return\n"
"\n"
-" head : Gets only the headers of an object, implies -s\n"
-" <bucket>/<key> : Bucket/key of object to get headers of\n"
+" head : Gets only the headers of an object, implies -s\n"
+" <bucket>/<key> : Bucket/key of object to get headers of\n"
"\n"
-" gqs : Generates an authenticated query string\n"
-" <bucket>[/<key>] : Bucket or bucket/key to generate query string for\n"
-" [expires] : Expiration date for query string\n"
-" [resource] : Sub-resource of key for query string, without a\n"
-" leading '?', for example, \"torrent\"\n"
+" gqs : Generates an authenticated query string\n"
+" <bucket>[/<key>] : Bucket or bucket/key to generate query string for\n"
+" [expires] : Expiration date for query string\n"
+" [resource] : Sub-resource of key for query string, without a\n"
+" leading '?', for example, \"torrent\"\n"
"\n"
" Canned ACLs:\n"
"\n"
" The following canned ACLs are supported:\n"
-" private (default), public-read, public-read-write, authenticated-read\n"
+" private (default), public-read, public-read-write, authenticated-read\n"
"\n"
" ACL Format:\n"
"\n"
" For the getacl and setacl commands, the format of the ACL list is:\n"
" 1) An initial line giving the owner id in this format:\n"
-" OwnerID <Owner ID> <Owner Display Name>\n"
+" OwnerID <Owner ID> <Owner Display Name>\n"
" 2) Optional header lines, giving column headers, starting with the\n"
-" word \"Type\", or with some number of dashes\n"
+" word \"Type\", or with some number of dashes\n"
" 3) Grant lines, of the form:\n"
-" <Grant Type> (whitespace) <Grantee> (whitespace) <Permission>\n"
-" where Grant Type is one of: Email, UserID, or Group, and\n"
-" Grantee is the identification of the grantee based on this type,\n"
-" and Permission is one of: READ, WRITE, READ_ACP, or FULL_CONTROL.\n"
+" <Grant Type> (whitespace) <Grantee> (whitespace) <Permission>\n"
+" where Grant Type is one of: Email, UserID, or Group, and\n"
+" Grantee is the identification of the grantee based on this type,\n"
+" and Permission is one of: READ, WRITE, READ_ACP, or FULL_CONTROL.\n"
"\n"
" Note that the easiest way to modify an ACL is to first get it, saving it\n"
" into a file, then modifying the file, and then setting the modified file\n"
@@ -317,217 +317,217 @@ static void usageExit(FILE *out)
"\n"
" The format for dates used in parameters is as ISO 8601 dates, i.e.\n"
" YYYY-MM-DDTHH:MM:SS[.s...][T/+-dd:dd]. Examples:\n"
-" 2008-07-29T20:36:14.0023T\n"
-" 2008-07-29T20:36:14.0023+06:00\n"
-" 2008-07-29T20:36:14.0023-10:00\n"
+" 2008-07-29T20:36:14.0023T\n"
+" 2008-07-29T20:36:14.0023+06:00\n"
+" 2008-07-29T20:36:14.0023-10:00\n"
"\n");
- exit(-1);
+ exit(-1);
}
static uint64_t convertInt(const char *str, const char *paramName)
{
- uint64_t ret = 0;
-
- while (*str) {
- if (!isdigit(*str)) {
- fprintf(stderr, "\nERROR: Nondigit in %s parameter: %c\n",
- paramName, *str);
- usageExit(stderr);
- }
- ret *= 10;
- ret += (*str++ - '0');
- }
-
- return ret;
+ uint64_t ret = 0;
+
+ while (*str) {
+ if (!isdigit(*str)) {
+ fprintf(stderr, "\nERROR: Nondigit in %s parameter: %c\n",
+ paramName, *str);
+ usageExit(stderr);
+ }
+ ret *= 10;
+ ret += (*str++ - '0');
+ }
+
+ return ret;
}
typedef struct growbuffer
{
- // The total number of bytes, and the start byte
- int size;
- // The start byte
- int start;
- // The blocks
- char data[64 * 1024];
- struct growbuffer *prev, *next;
+ // The total number of bytes, and the start byte
+ int size;
+ // The start byte
+ int start;
+ // The blocks
+ char data[64 * 1024];
+ struct growbuffer *prev, *next;
} growbuffer;
// returns nonzero on success, zero on out of memory
static int growbuffer_append(growbuffer **gb, const char *data, int dataLen)
{
- while (dataLen) {
- growbuffer *buf = *gb ? (*gb)->prev : 0;
- if (!buf || (buf->size == sizeof(buf->data))) {
- buf = (growbuffer *) malloc(sizeof(growbuffer));
- if (!buf) {
- return 0;
- }
- buf->size = 0;
- buf->start = 0;
- if (*gb) {
- buf->prev = (*gb)->prev;
- buf->next = *gb;
- (*gb)->prev->next = buf;
- (*gb)->prev = buf;
- }
- else {
- buf->prev = buf->next = buf;
- *gb = buf;
- }
- }
-
- int toCopy = (sizeof(buf->data) - buf->size);
- if (toCopy > dataLen) {
- toCopy = dataLen;
- }
-
- memcpy(&(buf->data[buf->size]), data, toCopy);
-
- buf->size += toCopy, data += toCopy, dataLen -= toCopy;
- }
-
- return 1;
+ while (dataLen) {
+ growbuffer *buf = *gb ? (*gb)->prev : 0;
+ if (!buf || (buf->size == sizeof(buf->data))) {
+ buf = (growbuffer *) malloc(sizeof(growbuffer));
+ if (!buf) {
+ return 0;
+ }
+ buf->size = 0;
+ buf->start = 0;
+ if (*gb) {
+ buf->prev = (*gb)->prev;
+ buf->next = *gb;
+ (*gb)->prev->next = buf;
+ (*gb)->prev = buf;
+ }
+ else {
+ buf->prev = buf->next = buf;
+ *gb = buf;
+ }
+ }
+
+ int toCopy = (sizeof(buf->data) - buf->size);
+ if (toCopy > dataLen) {
+ toCopy = dataLen;
+ }
+
+ memcpy(&(buf->data[buf->size]), data, toCopy);
+
+ buf->size += toCopy, data += toCopy, dataLen -= toCopy;
+ }
+
+ return 1;
}
static void growbuffer_read(growbuffer **gb, int amt, int *amtReturn,
- char *buffer)
+ char *buffer)
{
- *amtReturn = 0;
+ *amtReturn = 0;
- growbuffer *buf = *gb;
+ growbuffer *buf = *gb;
- if (!buf) {
- return;
- }
+ if (!buf) {
+ return;
+ }
- *amtReturn = (buf->size > amt) ? amt : buf->size;
+ *amtReturn = (buf->size > amt) ? amt : buf->size;
- memcpy(buffer, &(buf->data[buf->start]), *amtReturn);
-
- buf->start += *amtReturn, buf->size -= *amtReturn;
+ memcpy(buffer, &(buf->data[buf->start]), *amtReturn);
+
+ buf->start += *amtReturn, buf->size -= *amtReturn;
- if (buf->size == 0) {
- if (buf->next == buf) {
- *gb = 0;
- }
- else {
- *gb = buf->next;
- }
- free(buf);
- }
+ if (buf->size == 0) {
+ if (buf->next == buf) {
+ *gb = 0;
+ }
+ else {
+ *gb = buf->next;
+ }
+ free(buf);
+ }
}
static void growbuffer_destroy(growbuffer *gb)
{
- growbuffer *start = gb;
+ growbuffer *start = gb;
- while (gb) {
- growbuffer *next = gb->next;
- free(gb);
- gb = (next == start) ? 0 : next;
- }
+ while (gb) {
+ growbuffer *next = gb->next;
+ free(gb);
+ gb = (next == start) ? 0 : next;
+ }
}
-// Convenience utility for making the code look nicer. Tests a string
+// Convenience utility for making the code look nicer. Tests a string
// against a format; only the characters specified in the format are
// checked (i.e. if the string is longer than the format, the string still
-// checks out ok). Format characters are:
+// checks out ok). Format characters are:
// d - is a digit
// anything else - is that character
// Returns nonzero the string checks out, zero if it does not.
static int checkString(const char *str, const char *format)
{
- while (*format) {
- if (*format == 'd') {
- if (!isdigit(*str)) {
- return 0;
- }
- }
- else if (*str != *format) {
- return 0;
- }
- str++, format++;
- }
-
- return 1;
+ while (*format) {
+ if (*format == 'd') {
+ if (!isdigit(*str)) {
+ return 0;
+ }
+ }
+ else if (*str != *format) {
+ return 0;
+ }
+ str++, format++;
+ }
+
+ return 1;
}
static int64_t parseIso8601Time(const char *str)
{
- // Check to make sure that it has a valid format
- if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) {
- return -1;
- }
+ // Check to make sure that it has a valid format
+ if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) {
+ return -1;
+ }
#define nextnum() (((*str - '0') * 10) + (*(str + 1) - '0'))
- // Convert it
- struct tm stm;
- memset(&stm, 0, sizeof(stm));
-
- stm.tm_year = (nextnum() - 19) * 100;
- str += 2;
- stm.tm_year += nextnum();
- str += 3;
-
- stm.tm_mon = nextnum() - 1;
- str += 3;
-
- stm.tm_mday = nextnum();
- str += 3;
-
- stm.tm_hour = nextnum();
- str += 3;
-
- stm.tm_min = nextnum();
- str += 3;
-
- stm.tm_sec = nextnum();
- str += 2;
-
- stm.tm_isdst = -1;
-
- // This is hokey but it's the recommended way ...
- char *tz = getenv("TZ");
- setenv("TZ", "UTC", 1);
-
- int64_t ret = mktime(&stm);
-
- if (tz) {
- setenv("TZ", tz, 1);
- }
- else {
- unsetenv("TZ");
- }
-
- // Skip the millis
-
- if (*str == '.') {
- str++;
- while (isdigit(*str)) {
- str++;
- }
- }
-
- if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
- int sign = (*str++ == '-') ? -1 : 1;
- int hours = nextnum();
- str += 3;
- int minutes = nextnum();
- ret += (-sign * (((hours * 60) + minutes) * 60));
- }
- // Else it should be Z to be a conformant time string, but we just assume
- // that it is rather than enforcing that
-
- return ret;
+ // Convert it
+ struct tm stm;
+ memset(&stm, 0, sizeof(stm));
+
+ stm.tm_year = (nextnum() - 19) * 100;
+ str += 2;
+ stm.tm_year += nextnum();
+ str += 3;
+
+ stm.tm_mon = nextnum() - 1;
+ str += 3;
+
+ stm.tm_mday = nextnum();
+ str += 3;
+
+ stm.tm_hour = nextnum();
+ str += 3;
+
+ stm.tm_min = nextnum();
+ str += 3;
+
+ stm.tm_sec = nextnum();
+ str += 2;
+
+ stm.tm_isdst = -1;
+
+ // This is hokey but it's the recommended way ...
+ char *tz = getenv("TZ");
+ setenv("TZ", "UTC", 1);
+
+ int64_t ret = mktime(&stm);
+
+ if (tz) {
+ setenv("TZ", tz, 1);
+ }
+ else {
+ unsetenv("TZ");
+ }
+
+ // Skip the millis
+
+ if (*str == '.') {
+ str++;
+ while (isdigit(*str)) {
+ str++;
+ }
+ }
+
+ if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
+ int sign = (*str++ == '-') ? -1 : 1;
+ int hours = nextnum();
+ str += 3;
+ int minutes = nextnum();
+ ret += (-sign * (((hours * 60) + minutes) * 60));
+ }
+ // Else it should be Z to be a conformant time string, but we just assume
+ // that it is rather than enforcing that
+
+ return ret;
}
@@ -537,161 +537,161 @@ static int64_t parseIso8601Time(const char *str)
// Email email_address permission
// UserID user_id (display_name) permission
// Group Authenticated AWS Users permission
-// Group All Users permission
+// Group All Users permission
// permission is one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL
static int convert_simple_acl(char *aclXml, char *ownerId,
- char *ownerDisplayName,
- int *aclGrantCountReturn,
- S3AclGrant *aclGrants)
+ char *ownerDisplayName,
+ int *aclGrantCountReturn,
+ S3AclGrant *aclGrants)
{
- *aclGrantCountReturn = 0;
- *ownerId = 0;
- *ownerDisplayName = 0;
-
-#define SKIP_SPACE(require_more) \
- do { \
- while (isspace(*aclXml)) { \
- aclXml++; \
- } \
- if (require_more && !*aclXml) { \
- return 0; \
- } \
- } while (0)
-
-#define COPY_STRING_MAXLEN(field, maxlen) \
- do { \
- SKIP_SPACE(1); \
- int len = 0; \
- while ((len < maxlen) && !isspace(*aclXml)) { \
- field[len++] = *aclXml++; \
- } \
- field[len] = 0; \
- } while (0)
-
-#define COPY_STRING(field) \
- COPY_STRING_MAXLEN(field, (int) (sizeof(field) - 1))
-
- while (1) {
- SKIP_SPACE(0);
-
- if (!*aclXml) {
- break;
- }
-
- // Skip Type lines and dash lines
- if (!strncmp(aclXml, "Type", sizeof("Type") - 1) ||
- (*aclXml == '-')) {
- while (*aclXml && ((*aclXml != '\n') && (*aclXml != '\r'))) {
- aclXml++;
- }
- continue;
- }
-
- if (!strncmp(aclXml, "OwnerID", sizeof("OwnerID") - 1)) {
- aclXml += sizeof("OwnerID") - 1;
- COPY_STRING_MAXLEN(ownerId, S3_MAX_GRANTEE_USER_ID_SIZE);
- SKIP_SPACE(1);
- COPY_STRING_MAXLEN(ownerDisplayName,
- S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
- continue;
- }
-
- if (*aclGrantCountReturn == S3_MAX_ACL_GRANT_COUNT) {
- return 0;
- }
-
- S3AclGrant *grant = &(aclGrants[(*aclGrantCountReturn)++]);
-
- if (!strncmp(aclXml, "Email", sizeof("Email") - 1)) {
- grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
- aclXml += sizeof("Email") - 1;
- COPY_STRING(grant->grantee.amazonCustomerByEmail.emailAddress);
- }
- else if (!strncmp(aclXml, "UserID", sizeof("UserID") - 1)) {
- grant->granteeType = S3GranteeTypeCanonicalUser;
- aclXml += sizeof("UserID") - 1;
- COPY_STRING(grant->grantee.canonicalUser.id);
- SKIP_SPACE(1);
- // Now do display name
- COPY_STRING(grant->grantee.canonicalUser.displayName);
- }
- else if (!strncmp(aclXml, "Group", sizeof("Group") - 1)) {
- aclXml += sizeof("Group") - 1;
- SKIP_SPACE(1);
- if (!strncmp(aclXml, "Authenticated AWS Users",
- sizeof("Authenticated AWS Users") - 1)) {
- grant->granteeType = S3GranteeTypeAllAwsUsers;
- aclXml += (sizeof("Authenticated AWS Users") - 1);
- }
- else if (!strncmp(aclXml, "All Users", sizeof("All Users") - 1)) {
- grant->granteeType = S3GranteeTypeAllUsers;
- aclXml += (sizeof("All Users") - 1);
- }
- else if (!strncmp(aclXml, "Log Delivery",
- sizeof("Log Delivery") - 1)) {
- grant->granteeType = S3GranteeTypeLogDelivery;
- aclXml += (sizeof("Log Delivery") - 1);
- }
- else {
- return 0;
- }
- }
- else {
- return 0;
- }
-
- SKIP_SPACE(1);
-
- if (!strncmp(aclXml, "READ_ACP", sizeof("READ_ACP") - 1)) {
- grant->permission = S3PermissionReadACP;
- aclXml += (sizeof("READ_ACP") - 1);
- }
- else if (!strncmp(aclXml, "READ", sizeof("READ") - 1)) {
- grant->permission = S3PermissionRead;
- aclXml += (sizeof("READ") - 1);
- }
- else if (!strncmp(aclXml, "WRITE_ACP", sizeof("WRITE_ACP") - 1)) {
- grant->permission = S3PermissionWriteACP;
- aclXml += (sizeof("WRITE_ACP") - 1);
- }
- else if (!strncmp(aclXml, "WRITE", sizeof("WRITE") - 1)) {
- grant->permission = S3PermissionWrite;
- aclXml += (sizeof("WRITE") - 1);
- }
- else if (!strncmp(aclXml, "FULL_CONTROL",
- sizeof("FULL_CONTROL") - 1)) {
- grant->permission = S3PermissionFullControl;
- aclXml += (sizeof("FULL_CONTROL") - 1);
- }
- }
-
- return 1;
+ *aclGrantCountReturn = 0;
+ *ownerId = 0;
+ *ownerDisplayName = 0;
+
+#define SKIP_SPACE(require_more) \
+ do { \
+ while (isspace(*aclXml)) { \
+ aclXml++; \
+ } \
+ if (require_more && !*aclXml) { \
+ return 0; \
+ } \
+ } while (0)
+
+#define COPY_STRING_MAXLEN(field, maxlen) \
+ do { \
+ SKIP_SPACE(1); \
+ int len = 0; \
+ while ((len < maxlen) && !isspace(*aclXml)) { \
+ field[len++] = *aclXml++; \
+ } \
+ field[len] = 0; \
+ } while (0)
+
+#define COPY_STRING(field) \
+ COPY_STRING_MAXLEN(field, (int) (sizeof(field) - 1))
+
+ while (1) {
+ SKIP_SPACE(0);
+
+ if (!*aclXml) {
+ break;
+ }
+
+ // Skip Type lines and dash lines
+ if (!strncmp(aclXml, "Type", sizeof("Type") - 1) ||
+ (*aclXml == '-')) {
+ while (*aclXml && ((*aclXml != '\n') && (*aclXml != '\r'))) {
+ aclXml++;
+ }
+ continue;
+ }
+
+ if (!strncmp(aclXml, "OwnerID", sizeof("OwnerID") - 1)) {
+ aclXml += sizeof("OwnerID") - 1;
+ COPY_STRING_MAXLEN(ownerId, S3_MAX_GRANTEE_USER_ID_SIZE);
+ SKIP_SPACE(1);
+ COPY_STRING_MAXLEN(ownerDisplayName,
+ S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
+ continue;
+ }
+
+ if (*aclGrantCountReturn == S3_MAX_ACL_GRANT_COUNT) {
+ return 0;
+ }
+
+ S3AclGrant *grant = &(aclGrants[(*aclGrantCountReturn)++]);
+
+ if (!strncmp(aclXml, "Email", sizeof("Email") - 1)) {
+ grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
+ aclXml += sizeof("Email") - 1;
+ COPY_STRING(grant->grantee.amazonCustomerByEmail.emailAddress);
+ }
+ else if (!strncmp(aclXml, "UserID", sizeof("UserID") - 1)) {
+ grant->granteeType = S3GranteeTypeCanonicalUser;
+ aclXml += sizeof("UserID") - 1;
+ COPY_STRING(grant->grantee.canonicalUser.id);
+ SKIP_SPACE(1);
+ // Now do display name
+ COPY_STRING(grant->grantee.canonicalUser.displayName);
+ }
+ else if (!strncmp(aclXml, "Group", sizeof("Group") - 1)) {
+ aclXml += sizeof("Group") - 1;
+ SKIP_SPACE(1);
+ if (!strncmp(aclXml, "Authenticated AWS Users",
+ sizeof("Authenticated AWS Users") - 1)) {
+ grant->granteeType = S3GranteeTypeAllAwsUsers;
+ aclXml += (sizeof("Authenticated AWS Users") - 1);
+ }
+ else if (!strncmp(aclXml, "All Users", sizeof("All Users") - 1)) {
+ grant->granteeType = S3GranteeTypeAllUsers;
+ aclXml += (sizeof("All Users") - 1);
+ }
+ else if (!strncmp(aclXml, "Log Delivery",
+ sizeof("Log Delivery") - 1)) {
+ grant->granteeType = S3GranteeTypeLogDelivery;
+ aclXml += (sizeof("Log Delivery") - 1);
+ }
+ else {
+ return 0;
+ }
+ }
+ else {
+ return 0;
+ }
+
+ SKIP_SPACE(1);
+
+ if (!strncmp(aclXml, "READ_ACP", sizeof("READ_ACP") - 1)) {
+ grant->permission = S3PermissionReadACP;
+ aclXml += (sizeof("READ_ACP") - 1);
+ }
+ else if (!strncmp(aclXml, "READ", sizeof("READ") - 1)) {
+ grant->permission = S3PermissionRead;
+ aclXml += (sizeof("READ") - 1);
+ }
+ else if (!strncmp(aclXml, "WRITE_ACP", sizeof("WRITE_ACP") - 1)) {
+ grant->permission = S3PermissionWriteACP;
+ aclXml += (sizeof("WRITE_ACP") - 1);
+ }
+ else if (!strncmp(aclXml, "WRITE", sizeof("WRITE") - 1)) {
+ grant->permission = S3PermissionWrite;
+ aclXml += (sizeof("WRITE") - 1);
+ }
+ else if (!strncmp(aclXml, "FULL_CONTROL",
+ sizeof("FULL_CONTROL") - 1)) {
+ grant->permission = S3PermissionFullControl;
+ aclXml += (sizeof("FULL_CONTROL") - 1);
+ }
+ }
+
+ return 1;
}
static int should_retry()
{
- if (retriesG--) {
- // Sleep before next retry; start out with a 1 second sleep
- static int retrySleepInterval = 1;
- sleep(retrySleepInterval);
- // Next sleep 1 second longer
- retrySleepInterval++;
- return 1;
- }
-
- return 0;
+ if (retriesG--) {
+ // Sleep before next retry; start out with a 1 second sleep
+ static int retrySleepInterval = 1;
+ sleep(retrySleepInterval);
+ // Next sleep 1 second longer
+ retrySleepInterval++;
+ return 1;
+ }
+
+ return 0;
}
static struct option longOptionsG[] =
{
- { "force", no_argument, 0, 'f' },
- { "vhost-style", no_argument, 0, 'h' },
- { "unencrypted", no_argument, 0, 'u' },
- { "show-properties", no_argument, 0, 's' },
- { "retries", required_argument, 0, 'r' },
- { 0, 0, 0, 0 }
+ { "force", no_argument, 0, 'f' },
+ { "vhost-style", no_argument, 0, 'h' },
+ { "unencrypted", no_argument, 0, 'u' },
+ { "show-properties", no_argument, 0, 's' },
+ { "retries", required_argument, 0, 'r' },
+ { 0, 0, 0, 0 }
};
@@ -700,44 +700,44 @@ static struct option longOptionsG[] =
// This callback does the same thing for every request type: prints out the
// properties if the user has requested them to be so
static S3Status responsePropertiesCallback
- (const S3ResponseProperties *properties, void *callbackData)
+ (const S3ResponseProperties *properties, void *callbackData)
{
- (void) callbackData;
-
- if (!showResponsePropertiesG) {
- return S3StatusOK;
- }
-
-#define print_nonnull(name, field) \
- do { \
- if (properties-> field) { \
- printf("%s: %s\n", name, properties-> field); \
- } \
- } while (0)
-
- print_nonnull("Content-Type", contentType);
- print_nonnull("Request-Id", requestId);
- print_nonnull("Request-Id-2", requestId2);
- if (properties->contentLength > 0) {
- printf("Content-Length: %lld\n",
- (unsigned long long) properties->contentLength);
- }
- print_nonnull("Server", server);
- print_nonnull("ETag", eTag);
- if (properties->lastModified > 0) {
- char timebuf[256];
- time_t t = (time_t) properties->lastModified;
- // gmtime is not thread-safe but we don't care here.
- strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
- printf("Last-Modified: %s\n", timebuf);
- }
- int i;
- for (i = 0; i < properties->metaDataCount; i++) {
- printf("x-amz-meta-%s: %s\n", properties->metaData[i].name,
- properties->metaData[i].value);
- }
-
- return S3StatusOK;
+ (void) callbackData;
+
+ if (!showResponsePropertiesG) {
+ return S3StatusOK;
+ }
+
+#define print_nonnull(name, field) \
+ do { \
+ if (properties-> field) { \
+ printf("%s: %s\n", name, properties-> field); \
+ } \
+ } while (0)
+
+ print_nonnull("Content-Type", contentType);
+ print_nonnull("Request-Id", requestId);
+ print_nonnull("Request-Id-2", requestId2);
+ if (properties->contentLength > 0) {
+ printf("Content-Length: %lld\n",
+ (unsigned long long) properties->contentLength);
+ }
+ print_nonnull("Server", server);
+ print_nonnull("ETag", eTag);
+ if (properties->lastModified > 0) {
+ char timebuf[256];
+ time_t t = (time_t) properties->lastModified;
+ // gmtime is not thread-safe but we don't care here.
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
+ printf("Last-Modified: %s\n", timebuf);
+ }
+ int i;
+ for (i = 0; i < properties->metaDataCount; i++) {
+ printf("x-amz-meta-%s: %s\n", properties->metaData[i].name,
+ properties->metaData[i].value);
+ }
+
+ return S3StatusOK;
}
@@ -746,39 +746,39 @@ static S3Status responsePropertiesCallback
// This callback does the same thing for every request type: saves the status
// and error stuff in global variables
static void responseCompleteCallback(S3Status status,
- const S3ErrorDetails *error,
- void *callbackData)
+ const S3ErrorDetails *error,
+ void *callbackData)
{
- (void) callbackData;
-
- statusG = status;
- // Compose the error details message now, although we might not use it.
- // Can't just save a pointer to [error] since it's not guaranteed to last
- // beyond this callback
- int len = 0;
- if (error && error->message) {
- len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
- " Message: %s\n", error->message);
- }
- if (error && error->resource) {
- len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
- " Resource: %s\n", error->resource);
- }
- if (error && error->furtherDetails) {
- len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
- " Further Details: %s\n", error->furtherDetails);
- }
- if (error && error->extraDetailsCount) {
- len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
- "%s", " Extra Details:\n");
- int i;
- for (i = 0; i < error->extraDetailsCount; i++) {
- len += snprintf(&(errorDetailsG[len]),
- sizeof(errorDetailsG) - len, " %s: %s\n",
- error->extraDetails[i].name,
- error->extraDetails[i].value);
- }
- }
+ (void) callbackData;
+
+ statusG = status;
+ // Compose the error details message now, although we might not use it.
+ // Can't just save a pointer to [error] since it's not guaranteed to last
+ // beyond this callback
+ int len = 0;
+ if (error && error->message) {
+ len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+ " Message: %s\n", error->message);
+ }
+ if (error && error->resource) {
+ len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+ " Resource: %s\n", error->resource);
+ }
+ if (error && error->furtherDetails) {
+ len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+ " Further Details: %s\n", error->furtherDetails);
+ }
+ if (error && error->extraDetailsCount) {
+ len += snprintf(&(errorDetailsG[len]), sizeof(errorDetailsG) - len,
+ "%s", " Extra Details:\n");
+ int i;
+ for (i = 0; i < error->extraDetailsCount; i++) {
+ len += snprintf(&(errorDetailsG[len]),
+ sizeof(errorDetailsG) - len, " %s: %s\n",
+ error->extraDetails[i].name,
+ error->extraDetails[i].value);
+ }
+ }
}
@@ -786,93 +786,93 @@ static void responseCompleteCallback(S3Status status,
typedef struct list_service_data
{
- int headerPrinted;
- int allDetails;
+ int headerPrinted;
+ int allDetails;
} list_service_data;
static void printListServiceHeader(int allDetails)
{
- printf("%-56s %-20s", " Bucket",
- " Created");
- if (allDetails) {
- printf(" %-64s %-12s",
- " Owner ID",
- "Display Name");
- }
- printf("\n");
- printf("-------------------------------------------------------- "
- "--------------------");
- if (allDetails) {
- printf(" -------------------------------------------------"
- "--------------- ------------");
- }
- printf("\n");
+ printf("%-56s %-20s", " Bucket",
+ " Created");
+ if (allDetails) {
+ printf(" %-64s %-12s",
+ " Owner ID",
+ "Display Name");
+ }
+ printf("\n");
+ printf("-------------------------------------------------------- "
+ "--------------------");
+ if (allDetails) {
+ printf(" -------------------------------------------------"
+ "--------------- ------------");
+ }
+ printf("\n");
}
static S3Status listServiceCallback(const char *ownerId,
- const char *ownerDisplayName,
- const char *bucketName,
- int64_t creationDate, void *callbackData)
+ const char *ownerDisplayName,
+ const char *bucketName,
+ int64_t creationDate, void *callbackData)
{
- list_service_data *data = (list_service_data *) callbackData;
-
- if (!data->headerPrinted) {
- data->headerPrinted = 1;
- printListServiceHeader(data->allDetails);
- }
-
- char timebuf[256];
- if (creationDate >= 0) {
- time_t t = (time_t) creationDate;
- strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
- }
- else {
- timebuf[0] = 0;
- }
-
- printf("%-56s %-20s", bucketName, timebuf);
- if (data->allDetails) {
- printf(" %-64s %-12s", ownerId ? ownerId : "",
- ownerDisplayName ? ownerDisplayName : "");
- }
- printf("\n");
-
- return S3StatusOK;
+ list_service_data *data = (list_service_data *) callbackData;
+
+ if (!data->headerPrinted) {
+ data->headerPrinted = 1;
+ printListServiceHeader(data->allDetails);
+ }
+
+ char timebuf[256];
+ if (creationDate >= 0) {
+ time_t t = (time_t) creationDate;
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
+ }
+ else {
+ timebuf[0] = 0;
+ }
+
+ printf("%-56s %-20s", bucketName, timebuf);
+ if (data->allDetails) {
+ printf(" %-64s %-12s", ownerId ? ownerId : "",
+ ownerDisplayName ? ownerDisplayName : "");
+ }
+ printf("\n");
+
+ return S3StatusOK;
}
static void list_service(int allDetails)
{
- list_service_data data;
-
- data.headerPrinted = 0;
- data.allDetails = allDetails;
-
- S3_init();
-
- S3ListServiceHandler listServiceHandler =
- {
- { &responsePropertiesCallback, &responseCompleteCallback },
- &listServiceCallback
- };
-
- do {
- S3_list_service(protocolG, accessKeyIdG, secretAccessKeyG, 0,
- &listServiceHandler, &data);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG == S3StatusOK) {
- if (!data.headerPrinted) {
- printListServiceHeader(allDetails);
- }
- }
- else {
- printError();
- }
-
- S3_deinitialize();
+ list_service_data data;
+
+ data.headerPrinted = 0;
+ data.allDetails = allDetails;
+
+ S3_init();
+
+ S3ListServiceHandler listServiceHandler =
+ {
+ { &responsePropertiesCallback, &responseCompleteCallback },
+ &listServiceCallback
+ };
+
+ do {
+ S3_list_service(protocolG, accessKeyIdG, secretAccessKeyG, 0,
+ &listServiceHandler, &data);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG == S3StatusOK) {
+ if (!data.headerPrinted) {
+ printListServiceHeader(allDetails);
+ }
+ }
+ else {
+ printError();
+ }
+
+ S3_deinitialize();
}
@@ -880,63 +880,63 @@ static void list_service(int allDetails)
static void test_bucket(int argc, char **argv, int optindex)
{
- // test bucket
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex++];
-
- if (optindex != argc) {
- fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
- usageExit(stderr);
- }
-
- S3_init();
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback, &responseCompleteCallback
- };
-
- char locationConstraint[64];
- do {
- S3_test_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
- bucketName, sizeof(locationConstraint),
- locationConstraint, 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- const char *result;
-
- switch (statusG) {
- case S3StatusOK:
- // bucket exists
- result = locationConstraint[0] ? locationConstraint : "USA";
- break;
- case S3StatusErrorNoSuchBucket:
- result = "Does Not Exist";
- break;
- case S3StatusErrorAccessDenied:
- result = "Access Denied";
- break;
- default:
- result = 0;
- break;
- }
-
- if (result) {
- printf("%-56s %-20s\n", " Bucket",
- " Status");
- printf("-------------------------------------------------------- "
- "--------------------\n");
- printf("%-56s %-20s\n", bucketName, result);
- }
- else {
- printError();
- }
-
- S3_deinitialize();
+ // test bucket
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex++];
+
+ if (optindex != argc) {
+ fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
+ usageExit(stderr);
+ }
+
+ S3_init();
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback, &responseCompleteCallback
+ };
+
+ char locationConstraint[64];
+ do {
+ S3_test_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
+ bucketName, sizeof(locationConstraint),
+ locationConstraint, 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ const char *result;
+
+ switch (statusG) {
+ case S3StatusOK:
+ // bucket exists
+ result = locationConstraint[0] ? locationConstraint : "USA";
+ break;
+ case S3StatusErrorNoSuchBucket:
+ result = "Does Not Exist";
+ break;
+ case S3StatusErrorAccessDenied:
+ result = "Access Denied";
+ break;
+ default:
+ result = 0;
+ break;
+ }
+
+ if (result) {
+ printf("%-56s %-20s\n", " Bucket",
+ " Status");
+ printf("-------------------------------------------------------- "
+ "--------------------\n");
+ printf("%-56s %-20s\n", bucketName, result);
+ }
+ else {
+ printError();
+ }
+
+ S3_deinitialize();
}
@@ -944,76 +944,76 @@ static void test_bucket(int argc, char **argv, int optindex)
static void create_bucket(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex++];
-
- if (!forceG && (S3_validate_bucket_name
- (bucketName, S3UriStyleVirtualHost) != S3StatusOK)) {
- fprintf(stderr, "\nWARNING: Bucket name is not valid for "
- "virtual-host style URI access.\n");
- fprintf(stderr, "Bucket not created. Use -f option to force the "
- "bucket to be created despite\n");
- fprintf(stderr, "this warning.\n\n");
- exit(-1);
- }
-
- const char *locationConstraint = 0;
- S3CannedAcl cannedAcl = S3CannedAclPrivate;
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, LOCATION_PREFIX, LOCATION_PREFIX_LEN)) {
- locationConstraint = &(param[LOCATION_PREFIX_LEN]);
- }
- else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
- char *val = &(param[CANNED_ACL_PREFIX_LEN]);
- if (!strcmp(val, "private")) {
- cannedAcl = S3CannedAclPrivate;
- }
- else if (!strcmp(val, "public-read")) {
- cannedAcl = S3CannedAclPublicRead;
- }
- else if (!strcmp(val, "public-read-write")) {
- cannedAcl = S3CannedAclPublicReadWrite;
- }
- else if (!strcmp(val, "authenticated-read")) {
- cannedAcl = S3CannedAclAuthenticatedRead;
- }
- else {
- fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
- usageExit(stderr);
- }
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- S3_init();
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback, &responseCompleteCallback
- };
-
- do {
- S3_create_bucket(protocolG, accessKeyIdG, secretAccessKeyG,
- bucketName, cannedAcl, locationConstraint, 0,
- &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG == S3StatusOK) {
- printf("Bucket successfully created.\n");
- }
- else {
- printError();
- }
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex++];
+
+ if (!forceG && (S3_validate_bucket_name
+ (bucketName, S3UriStyleVirtualHost) != S3StatusOK)) {
+ fprintf(stderr, "\nWARNING: Bucket name is not valid for "
+ "virtual-host style URI access.\n");
+ fprintf(stderr, "Bucket not created. Use -f option to force the "
+ "bucket to be created despite\n");
+ fprintf(stderr, "this warning.\n\n");
+ exit(-1);
+ }
+
+ const char *locationConstraint = 0;
+ S3CannedAcl cannedAcl = S3CannedAclPrivate;
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, LOCATION_PREFIX, LOCATION_PREFIX_LEN)) {
+ locationConstraint = &(param[LOCATION_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
+ char *val = &(param[CANNED_ACL_PREFIX_LEN]);
+ if (!strcmp(val, "private")) {
+ cannedAcl = S3CannedAclPrivate;
+ }
+ else if (!strcmp(val, "public-read")) {
+ cannedAcl = S3CannedAclPublicRead;
+ }
+ else if (!strcmp(val, "public-read-write")) {
+ cannedAcl = S3CannedAclPublicReadWrite;
+ }
+ else if (!strcmp(val, "authenticated-read")) {
+ cannedAcl = S3CannedAclAuthenticatedRead;
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
+ usageExit(stderr);
+ }
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ S3_init();
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback, &responseCompleteCallback
+ };
+
+ do {
+ S3_create_bucket(protocolG, accessKeyIdG, secretAccessKeyG,
+ bucketName, cannedAcl, locationConstraint, 0,
+ &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG == S3StatusOK) {
+ printf("Bucket successfully created.\n");
+ }
+ else {
+ printError();
+ }
+
+ S3_deinitialize();
}
@@ -1021,35 +1021,35 @@ static void create_bucket(int argc, char **argv, int optindex)
static void delete_bucket(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
- usageExit(stderr);
- }
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+ usageExit(stderr);
+ }
- const char *bucketName = argv[optindex++];
+ const char *bucketName = argv[optindex++];
- if (optindex != argc) {
- fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
- usageExit(stderr);
- }
+ if (optindex != argc) {
+ fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
+ usageExit(stderr);
+ }
- S3_init();
+ S3_init();
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback, &responseCompleteCallback
- };
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback, &responseCompleteCallback
+ };
- do {
- S3_delete_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
- bucketName, 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
+ do {
+ S3_delete_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
+ bucketName, 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
- if (statusG != S3StatusOK) {
- printError();
- }
+ if (statusG != S3StatusOK) {
+ printError();
+ }
- S3_deinitialize();
+ S3_deinitialize();
}
@@ -1057,285 +1057,285 @@ static void delete_bucket(int argc, char **argv, int optindex)
typedef struct list_bucket_callback_data
{
- int isTruncated;
- char nextMarker[1024];
- int keyCount;
- int allDetails;
+ int isTruncated;
+ char nextMarker[1024];
+ int keyCount;
+ int allDetails;
} list_bucket_callback_data;
static void printListBucketHeader(int allDetails)
{
- printf("%-50s %-20s %-5s",
- " Key",
- " Last Modified", "Size");
- if (allDetails) {
- printf(" %-34s %-64s %-12s",
- " ETag",
- " Owner ID",
- "Display Name");
- }
- printf("\n");
- printf("-------------------------------------------------- "
- "-------------------- -----");
- if (allDetails) {
- printf(" ---------------------------------- "
- "-------------------------------------------------"
- "--------------- ------------");
- }
- printf("\n");
+ printf("%-50s %-20s %-5s",
+ " Key",
+ " Last Modified", "Size");
+ if (allDetails) {
+ printf(" %-34s %-64s %-12s",
+ " ETag",
+ " Owner ID",
+ "Display Name");
+ }
+ printf("\n");
+ printf("-------------------------------------------------- "
+ "-------------------- -----");
+ if (allDetails) {
+ printf(" ---------------------------------- "
+ "-------------------------------------------------"
+ "--------------- ------------");
+ }
+ printf("\n");
}
static S3Status listBucketCallback(int isTruncated, const char *nextMarker,
- int contentsCount,
- const S3ListBucketContent *contents,
- int commonPrefixesCount,
- const char **commonPrefixes,
- void *callbackData)
+ int contentsCount,
+ const S3ListBucketContent *contents,
+ int commonPrefixesCount,
+ const char **commonPrefixes,
+ void *callbackData)
{
- list_bucket_callback_data *data =
- (list_bucket_callback_data *) callbackData;
-
- data->isTruncated = isTruncated;
- // This is tricky. S3 doesn't return the NextMarker if there is no
- // delimiter. Why, I don't know, since it's still useful for paging
- // through results. We want NextMarker to be the last content in the
- // list, so set it to that if necessary.
- if ((!nextMarker || !nextMarker[0]) && contentsCount) {
- nextMarker = contents[contentsCount - 1].key;
- }
- if (nextMarker) {
- snprintf(data->nextMarker, sizeof(data->nextMarker), "%s",
- nextMarker);
- }
- else {
- data->nextMarker[0] = 0;
- }
-
- if (contentsCount && !data->keyCount) {
- printListBucketHeader(data->allDetails);
- }
-
- int i;
- for (i = 0; i < contentsCount; i++) {
- const S3ListBucketContent *content = &(contents[i]);
- char timebuf[256];
- if (0) {
- time_t t = (time_t) content->lastModified;
- strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
- gmtime(&t));
- printf("\nKey: %s\n", content->key);
- printf("Last Modified: %s\n", timebuf);
- printf("ETag: %s\n", content->eTag);
- printf("Size: %llu\n", (unsigned long long) content->size);
- if (content->ownerId) {
- printf("Owner ID: %s\n", content->ownerId);
- }
- if (content->ownerDisplayName) {
- printf("Owner Display Name: %s\n", content->ownerDisplayName);
- }
- }
- else {
- time_t t = (time_t) content->lastModified;
- strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
- gmtime(&t));
- char sizebuf[16];
- if (content->size < 100000) {
- sprintf(sizebuf, "%5llu", (unsigned long long) content->size);
- }
- else if (content->size < (1024 * 1024)) {
- sprintf(sizebuf, "%4lluK",
- ((unsigned long long) content->size) / 1024ULL);
- }
- else if (content->size < (10 * 1024 * 1024)) {
- float f = content->size;
- f /= (1024 * 1024);
- sprintf(sizebuf, "%1.2fM", f);
- }
- else if (content->size < (1024 * 1024 * 1024)) {
- sprintf(sizebuf, "%4lluM",
- ((unsigned long long) content->size) /
- (1024ULL * 1024ULL));
- }
- else {
- float f = (content->size / 1024);
- f /= (1024 * 1024);
- sprintf(sizebuf, "%1.2fG", f);
- }
- printf("%-50s %s %s", content->key, timebuf, sizebuf);
- if (data->allDetails) {
- printf(" %-34s %-64s %-12s",
- content->eTag,
- content->ownerId ? content->ownerId : "",
- content->ownerDisplayName ?
- content->ownerDisplayName : "");
- }
- printf("\n");
- }
- }
-
- data->keyCount += contentsCount;
-
- for (i = 0; i < commonPrefixesCount; i++) {
- printf("\nCommon Prefix: %s\n", commonPrefixes[i]);
- }
-
- return S3StatusOK;
+ list_bucket_callback_data *data =
+ (list_bucket_callback_data *) callbackData;
+
+ data->isTruncated = isTruncated;
+ // This is tricky. S3 doesn't return the NextMarker if there is no
+ // delimiter. Why, I don't know, since it's still useful for paging
+ // through results. We want NextMarker to be the last content in the
+ // list, so set it to that if necessary.
+ if ((!nextMarker || !nextMarker[0]) && contentsCount) {
+ nextMarker = contents[contentsCount - 1].key;
+ }
+ if (nextMarker) {
+ snprintf(data->nextMarker, sizeof(data->nextMarker), "%s",
+ nextMarker);
+ }
+ else {
+ data->nextMarker[0] = 0;
+ }
+
+ if (contentsCount && !data->keyCount) {
+ printListBucketHeader(data->allDetails);
+ }
+
+ int i;
+ for (i = 0; i < contentsCount; i++) {
+ const S3ListBucketContent *content = &(contents[i]);
+ char timebuf[256];
+ if (0) {
+ time_t t = (time_t) content->lastModified;
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+ gmtime(&t));
+ printf("\nKey: %s\n", content->key);
+ printf("Last Modified: %s\n", timebuf);
+ printf("ETag: %s\n", content->eTag);
+ printf("Size: %llu\n", (unsigned long long) content->size);
+ if (content->ownerId) {
+ printf("Owner ID: %s\n", content->ownerId);
+ }
+ if (content->ownerDisplayName) {
+ printf("Owner Display Name: %s\n", content->ownerDisplayName);
+ }
+ }
+ else {
+ time_t t = (time_t) content->lastModified;
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+ gmtime(&t));
+ char sizebuf[16];
+ if (content->size < 100000) {
+ sprintf(sizebuf, "%5llu", (unsigned long long) content->size);
+ }
+ else if (content->size < (1024 * 1024)) {
+ sprintf(sizebuf, "%4lluK",
+ ((unsigned long long) content->size) / 1024ULL);
+ }
+ else if (content->size < (10 * 1024 * 1024)) {
+ float f = content->size;
+ f /= (1024 * 1024);
+ sprintf(sizebuf, "%1.2fM", f);
+ }
+ else if (content->size < (1024 * 1024 * 1024)) {
+ sprintf(sizebuf, "%4lluM",
+ ((unsigned long long) content->size) /
+ (1024ULL * 1024ULL));
+ }
+ else {
+ float f = (content->size / 1024);
+ f /= (1024 * 1024);
+ sprintf(sizebuf, "%1.2fG", f);
+ }
+ printf("%-50s %s %s", content->key, timebuf, sizebuf);
+ if (data->allDetails) {
+ printf(" %-34s %-64s %-12s",
+ content->eTag,
+ content->ownerId ? content->ownerId : "",
+ content->ownerDisplayName ?
+ content->ownerDisplayName : "");
+ }
+ printf("\n");
+ }
+ }
+
+ data->keyCount += contentsCount;
+
+ for (i = 0; i < commonPrefixesCount; i++) {
+ printf("\nCommon Prefix: %s\n", commonPrefixes[i]);
+ }
+
+ return S3StatusOK;
}
static void list_bucket(const char *bucketName, const char *prefix,
- const char *marker, const char *delimiter,
- int maxkeys, int allDetails)
+ const char *marker, const char *delimiter,
+ int maxkeys, int allDetails)
{
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ListBucketHandler listBucketHandler =
- {
- { &responsePropertiesCallback, &responseCompleteCallback },
- &listBucketCallback
- };
-
- list_bucket_callback_data data;
-
- snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker);
- data.keyCount = 0;
- data.allDetails = allDetails;
-
- do {
- data.isTruncated = 0;
- do {
- S3_list_bucket(&bucketContext, prefix, data.nextMarker,
- delimiter, maxkeys, 0, &listBucketHandler, &data);
- } while (S3_status_is_retryable(statusG) && should_retry());
- if (statusG != S3StatusOK) {
- break;
- }
- marker = data.nextMarker;
- } while (data.isTruncated && (!maxkeys || (data.keyCount < maxkeys)));
-
- if (statusG == S3StatusOK) {
- if (!data.keyCount) {
- printListBucketHeader(allDetails);
- }
- }
- else {
- printError();
- }
-
- S3_deinitialize();
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ListBucketHandler listBucketHandler =
+ {
+ { &responsePropertiesCallback, &responseCompleteCallback },
+ &listBucketCallback
+ };
+
+ list_bucket_callback_data data;
+
+ snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker);
+ data.keyCount = 0;
+ data.allDetails = allDetails;
+
+ do {
+ data.isTruncated = 0;
+ do {
+ S3_list_bucket(&bucketContext, prefix, data.nextMarker,
+ delimiter, maxkeys, 0, &listBucketHandler, &data);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+ if (statusG != S3StatusOK) {
+ break;
+ }
+ marker = data.nextMarker;
+ } while (data.isTruncated && (!maxkeys || (data.keyCount < maxkeys)));
+
+ if (statusG == S3StatusOK) {
+ if (!data.keyCount) {
+ printListBucketHeader(allDetails);
+ }
+ }
+ else {
+ printError();
+ }
+
+ S3_deinitialize();
}
static void list(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- list_service(0);
- return;
- }
-
- const char *bucketName = 0;
-
- const char *prefix = 0, *marker = 0, *delimiter = 0;
- int maxkeys = 0, allDetails = 0;
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, PREFIX_PREFIX, PREFIX_PREFIX_LEN)) {
- prefix = &(param[PREFIX_PREFIX_LEN]);
- }
- else if (!strncmp(param, MARKER_PREFIX, MARKER_PREFIX_LEN)) {
- marker = &(param[MARKER_PREFIX_LEN]);
- }
- else if (!strncmp(param, DELIMITER_PREFIX, DELIMITER_PREFIX_LEN)) {
- delimiter = &(param[DELIMITER_PREFIX_LEN]);
- }
- else if (!strncmp(param, MAXKEYS_PREFIX, MAXKEYS_PREFIX_LEN)) {
- maxkeys = convertInt(&(param[MAXKEYS_PREFIX_LEN]), "maxkeys");
- }
- else if (!strncmp(param, ALL_DETAILS_PREFIX,
- ALL_DETAILS_PREFIX_LEN)) {
- const char *ad = &(param[ALL_DETAILS_PREFIX_LEN]);
- if (!strcmp(ad, "true") || !strcmp(ad, "TRUE") ||
- !strcmp(ad, "yes") || !strcmp(ad, "YES") ||
- !strcmp(ad, "1")) {
- allDetails = 1;
- }
- }
- else if (!bucketName) {
- bucketName = param;
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- if (bucketName) {
- list_bucket(bucketName, prefix, marker, delimiter, maxkeys,
- allDetails);
- }
- else {
- list_service(allDetails);
- }
+ if (optindex == argc) {
+ list_service(0);
+ return;
+ }
+
+ const char *bucketName = 0;
+
+ const char *prefix = 0, *marker = 0, *delimiter = 0;
+ int maxkeys = 0, allDetails = 0;
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, PREFIX_PREFIX, PREFIX_PREFIX_LEN)) {
+ prefix = &(param[PREFIX_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, MARKER_PREFIX, MARKER_PREFIX_LEN)) {
+ marker = &(param[MARKER_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, DELIMITER_PREFIX, DELIMITER_PREFIX_LEN)) {
+ delimiter = &(param[DELIMITER_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, MAXKEYS_PREFIX, MAXKEYS_PREFIX_LEN)) {
+ maxkeys = convertInt(&(param[MAXKEYS_PREFIX_LEN]), "maxkeys");
+ }
+ else if (!strncmp(param, ALL_DETAILS_PREFIX,
+ ALL_DETAILS_PREFIX_LEN)) {
+ const char *ad = &(param[ALL_DETAILS_PREFIX_LEN]);
+ if (!strcmp(ad, "true") || !strcmp(ad, "TRUE") ||
+ !strcmp(ad, "yes") || !strcmp(ad, "YES") ||
+ !strcmp(ad, "1")) {
+ allDetails = 1;
+ }
+ }
+ else if (!bucketName) {
+ bucketName = param;
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ if (bucketName) {
+ list_bucket(bucketName, prefix, marker, delimiter, maxkeys,
+ allDetails);
+ }
+ else {
+ list_service(allDetails);
+ }
}
-
+
// delete object -------------------------------------------------------------
static void delete_object(int argc, char **argv, int optindex)
{
- (void) argc;
-
- // Split bucket/key
- char *slash = argv[optindex];
-
- // We know there is a slash in there, put_object is only called if so
- while (*slash && (*slash != '/')) {
- slash++;
- }
- *slash++ = 0;
-
- const char *bucketName = argv[optindex++];
- const char *key = slash;
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ResponseHandler responseHandler =
- {
- 0,
- &responseCompleteCallback
- };
-
- do {
- S3_delete_object(&bucketContext, key, 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if ((statusG != S3StatusOK) &&
- (statusG != S3StatusErrorPreconditionFailed)) {
- printError();
- }
-
- S3_deinitialize();
+ (void) argc;
+
+ // Split bucket/key
+ char *slash = argv[optindex];
+
+ // We know there is a slash in there, put_object is only called if so
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ *slash++ = 0;
+
+ const char *bucketName = argv[optindex++];
+ const char *key = slash;
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ 0,
+ &responseCompleteCallback
+ };
+
+ do {
+ S3_delete_object(&bucketContext, key, 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if ((statusG != S3StatusOK) &&
+ (statusG != S3StatusErrorPreconditionFailed)) {
+ printError();
+ }
+
+ S3_deinitialize();
}
@@ -1343,283 +1343,283 @@ static void delete_object(int argc, char **argv, int optindex)
typedef struct put_object_callback_data
{
- FILE *infile;
- growbuffer *gb;
- uint64_t contentLength, originalContentLength;
- int noStatus;
+ FILE *infile;
+ growbuffer *gb;
+ uint64_t contentLength, originalContentLength;
+ int noStatus;
} put_object_callback_data;
static int putObjectDataCallback(int bufferSize, char *buffer,
- void *callbackData)
+ void *callbackData)
{
- put_object_callback_data *data =
- (put_object_callback_data *) callbackData;
-
- int ret = 0;
-
- if (data->contentLength) {
- int toRead = ((data->contentLength > (unsigned) bufferSize) ?
- (unsigned) bufferSize : data->contentLength);
- if (data->gb) {
- growbuffer_read(&(data->gb), toRead, &ret, buffer);
- }
- else if (data->infile) {
- ret = fread(buffer, 1, toRead, data->infile);
- }
- }
-
- data->contentLength -= ret;
-
- if (data->contentLength && !data->noStatus) {
- // Avoid a weird bug in MingW, which won't print the second integer
- // value properly when it's in the same call, so print separately
- printf("%llu bytes remaining ",
- (unsigned long long) data->contentLength);
- printf("(%d%% complete) ...\n",
- (int) (((data->originalContentLength -
- data->contentLength) * 100) /
- data->originalContentLength));
- }
-
- return ret;
+ put_object_callback_data *data =
+ (put_object_callback_data *) callbackData;
+
+ int ret = 0;
+
+ if (data->contentLength) {
+ int toRead = ((data->contentLength > (unsigned) bufferSize) ?
+ (unsigned) bufferSize : data->contentLength);
+ if (data->gb) {
+ growbuffer_read(&(data->gb), toRead, &ret, buffer);
+ }
+ else if (data->infile) {
+ ret = fread(buffer, 1, toRead, data->infile);
+ }
+ }
+
+ data->contentLength -= ret;
+
+ if (data->contentLength && !data->noStatus) {
+ // Avoid a weird bug in MingW, which won't print the second integer
+ // value properly when it's in the same call, so print separately
+ printf("%llu bytes remaining ",
+ (unsigned long long) data->contentLength);
+ printf("(%d%% complete) ...\n",
+ (int) (((data->originalContentLength -
+ data->contentLength) * 100) /
+ data->originalContentLength));
+ }
+
+ return ret;
}
static void put_object(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
- usageExit(stderr);
- }
-
- // Split bucket/key
- char *slash = argv[optindex];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (!*slash || !*(slash + 1)) {
- fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
- argv[optindex]);
- usageExit(stderr);
- }
- *slash++ = 0;
-
- const char *bucketName = argv[optindex++];
- const char *key = slash;
-
- const char *filename = 0;
- uint64_t contentLength = 0;
- const char *cacheControl = 0, *contentType = 0, *md5 = 0;
- const char *contentDispositionFilename = 0, *contentEncoding = 0;
- int64_t expires = -1;
- S3CannedAcl cannedAcl = S3CannedAclPrivate;
- int metaPropertiesCount = 0;
- S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
- int noStatus = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
- filename = &(param[FILENAME_PREFIX_LEN]);
- }
- else if (!strncmp(param, CONTENT_LENGTH_PREFIX,
- CONTENT_LENGTH_PREFIX_LEN)) {
- contentLength = convertInt(&(param[CONTENT_LENGTH_PREFIX_LEN]),
- "contentLength");
- if (contentLength > (5LL * 1024 * 1024 * 1024)) {
- fprintf(stderr, "\nERROR: contentLength must be no greater "
- "than 5 GB\n");
- usageExit(stderr);
- }
- }
- else if (!strncmp(param, CACHE_CONTROL_PREFIX,
- CACHE_CONTROL_PREFIX_LEN)) {
- cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]);
- }
- else if (!strncmp(param, CONTENT_TYPE_PREFIX,
- CONTENT_TYPE_PREFIX_LEN)) {
- contentType = &(param[CONTENT_TYPE_PREFIX_LEN]);
- }
- else if (!strncmp(param, MD5_PREFIX, MD5_PREFIX_LEN)) {
- md5 = &(param[MD5_PREFIX_LEN]);
- }
- else if (!strncmp(param, CONTENT_DISPOSITION_FILENAME_PREFIX,
- CONTENT_DISPOSITION_FILENAME_PREFIX_LEN)) {
- contentDispositionFilename =
- &(param[CONTENT_DISPOSITION_FILENAME_PREFIX_LEN]);
- }
- else if (!strncmp(param, CONTENT_ENCODING_PREFIX,
- CONTENT_ENCODING_PREFIX_LEN)) {
- contentEncoding = &(param[CONTENT_ENCODING_PREFIX_LEN]);
- }
- else if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
- expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
- if (expires < 0) {
- fprintf(stderr, "\nERROR: Invalid expires time "
- "value; ISO 8601 time format required\n");
- usageExit(stderr);
- }
- }
- else if (!strncmp(param, X_AMZ_META_PREFIX, X_AMZ_META_PREFIX_LEN)) {
- if (metaPropertiesCount == S3_MAX_METADATA_COUNT) {
- fprintf(stderr, "\nERROR: Too many x-amz-meta- properties, "
- "limit %lu: %s\n",
- (unsigned long) S3_MAX_METADATA_COUNT, param);
- usageExit(stderr);
- }
- char *name = &(param[X_AMZ_META_PREFIX_LEN]);
- char *value = name;
- while (*value && (*value != '=')) {
- value++;
- }
- if (!*value || !*(value + 1)) {
- fprintf(stderr, "\nERROR: Invalid parameter: %s\n", param);
- usageExit(stderr);
- }
- *value++ = 0;
- metaProperties[metaPropertiesCount].name = name;
- metaProperties[metaPropertiesCount++].value = value;
- }
- else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
- char *val = &(param[CANNED_ACL_PREFIX_LEN]);
- if (!strcmp(val, "private")) {
- cannedAcl = S3CannedAclPrivate;
- }
- else if (!strcmp(val, "public-read")) {
- cannedAcl = S3CannedAclPublicRead;
- }
- else if (!strcmp(val, "public-read-write")) {
- cannedAcl = S3CannedAclPublicReadWrite;
- }
- else if (!strcmp(val, "authenticated-read")) {
- cannedAcl = S3CannedAclAuthenticatedRead;
- }
- else {
- fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
- usageExit(stderr);
- }
- }
- else if (!strncmp(param, NO_STATUS_PREFIX, NO_STATUS_PREFIX_LEN)) {
- const char *ns = &(param[NO_STATUS_PREFIX_LEN]);
- if (!strcmp(ns, "true") || !strcmp(ns, "TRUE") ||
- !strcmp(ns, "yes") || !strcmp(ns, "YES") ||
- !strcmp(ns, "1")) {
- noStatus = 1;
- }
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- put_object_callback_data data;
-
- data.infile = 0;
- data.gb = 0;
- data.noStatus = noStatus;
-
- if (filename) {
- if (!contentLength) {
- struct stat statbuf;
- // Stat the file to get its length
- if (stat(filename, &statbuf) == -1) {
- fprintf(stderr, "\nERROR: Failed to stat file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- contentLength = statbuf.st_size;
- }
- // Open the file
- if (!(data.infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
- fprintf(stderr, "\nERROR: Failed to open input file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- }
- else {
- // Read from stdin. If contentLength is not provided, we have
- // to read it all in to get contentLength.
- if (!contentLength) {
- // Read all if stdin to get the data
- char buffer[64 * 1024];
- while (1) {
- int amtRead = fread(buffer, 1, sizeof(buffer), stdin);
- if (amtRead == 0) {
- break;
- }
- if (!growbuffer_append(&(data.gb), buffer, amtRead)) {
- fprintf(stderr, "\nERROR: Out of memory while reading "
- "stdin\n");
- exit(-1);
- }
- contentLength += amtRead;
- if (amtRead < (int) sizeof(buffer)) {
- break;
- }
- }
- }
- else {
- data.infile = stdin;
- }
- }
-
- data.contentLength = data.originalContentLength = contentLength;
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3PutProperties putProperties =
- {
- contentType,
- md5,
- cacheControl,
- contentDispositionFilename,
- contentEncoding,
- expires,
- cannedAcl,
- metaPropertiesCount,
- metaProperties
- };
-
- S3PutObjectHandler putObjectHandler =
- {
- { &responsePropertiesCallback, &responseCompleteCallback },
- &putObjectDataCallback
- };
-
- do {
- S3_put_object(&bucketContext, key, contentLength, &putProperties, 0,
- &putObjectHandler, &data);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (data.infile) {
- fclose(data.infile);
- }
- else if (data.gb) {
- growbuffer_destroy(data.gb);
- }
-
- if (statusG != S3StatusOK) {
- printError();
- }
- else if (data.contentLength) {
- fprintf(stderr, "\nERROR: Failed to read remaining %llu bytes from "
- "input\n", (unsigned long long) data.contentLength);
- }
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
+ usageExit(stderr);
+ }
+
+ // Split bucket/key
+ char *slash = argv[optindex];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (!*slash || !*(slash + 1)) {
+ fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+ argv[optindex]);
+ usageExit(stderr);
+ }
+ *slash++ = 0;
+
+ const char *bucketName = argv[optindex++];
+ const char *key = slash;
+
+ const char *filename = 0;
+ uint64_t contentLength = 0;
+ const char *cacheControl = 0, *contentType = 0, *md5 = 0;
+ const char *contentDispositionFilename = 0, *contentEncoding = 0;
+ int64_t expires = -1;
+ S3CannedAcl cannedAcl = S3CannedAclPrivate;
+ int metaPropertiesCount = 0;
+ S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
+ int noStatus = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+ filename = &(param[FILENAME_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, CONTENT_LENGTH_PREFIX,
+ CONTENT_LENGTH_PREFIX_LEN)) {
+ contentLength = convertInt(&(param[CONTENT_LENGTH_PREFIX_LEN]),
+ "contentLength");
+ if (contentLength > (5LL * 1024 * 1024 * 1024)) {
+ fprintf(stderr, "\nERROR: contentLength must be no greater "
+ "than 5 GB\n");
+ usageExit(stderr);
+ }
+ }
+ else if (!strncmp(param, CACHE_CONTROL_PREFIX,
+ CACHE_CONTROL_PREFIX_LEN)) {
+ cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, CONTENT_TYPE_PREFIX,
+ CONTENT_TYPE_PREFIX_LEN)) {
+ contentType = &(param[CONTENT_TYPE_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, MD5_PREFIX, MD5_PREFIX_LEN)) {
+ md5 = &(param[MD5_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, CONTENT_DISPOSITION_FILENAME_PREFIX,
+ CONTENT_DISPOSITION_FILENAME_PREFIX_LEN)) {
+ contentDispositionFilename =
+ &(param[CONTENT_DISPOSITION_FILENAME_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, CONTENT_ENCODING_PREFIX,
+ CONTENT_ENCODING_PREFIX_LEN)) {
+ contentEncoding = &(param[CONTENT_ENCODING_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
+ expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
+ if (expires < 0) {
+ fprintf(stderr, "\nERROR: Invalid expires time "
+ "value; ISO 8601 time format required\n");
+ usageExit(stderr);
+ }
+ }
+ else if (!strncmp(param, X_AMZ_META_PREFIX, X_AMZ_META_PREFIX_LEN)) {
+ if (metaPropertiesCount == S3_MAX_METADATA_COUNT) {
+ fprintf(stderr, "\nERROR: Too many x-amz-meta- properties, "
+ "limit %lu: %s\n",
+ (unsigned long) S3_MAX_METADATA_COUNT, param);
+ usageExit(stderr);
+ }
+ char *name = &(param[X_AMZ_META_PREFIX_LEN]);
+ char *value = name;
+ while (*value && (*value != '=')) {
+ value++;
+ }
+ if (!*value || !*(value + 1)) {
+ fprintf(stderr, "\nERROR: Invalid parameter: %s\n", param);
+ usageExit(stderr);
+ }
+ *value++ = 0;
+ metaProperties[metaPropertiesCount].name = name;
+ metaProperties[metaPropertiesCount++].value = value;
+ }
+ else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
+ char *val = &(param[CANNED_ACL_PREFIX_LEN]);
+ if (!strcmp(val, "private")) {
+ cannedAcl = S3CannedAclPrivate;
+ }
+ else if (!strcmp(val, "public-read")) {
+ cannedAcl = S3CannedAclPublicRead;
+ }
+ else if (!strcmp(val, "public-read-write")) {
+ cannedAcl = S3CannedAclPublicReadWrite;
+ }
+ else if (!strcmp(val, "authenticated-read")) {
+ cannedAcl = S3CannedAclAuthenticatedRead;
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
+ usageExit(stderr);
+ }
+ }
+ else if (!strncmp(param, NO_STATUS_PREFIX, NO_STATUS_PREFIX_LEN)) {
+ const char *ns = &(param[NO_STATUS_PREFIX_LEN]);
+ if (!strcmp(ns, "true") || !strcmp(ns, "TRUE") ||
+ !strcmp(ns, "yes") || !strcmp(ns, "YES") ||
+ !strcmp(ns, "1")) {
+ noStatus = 1;
+ }
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ put_object_callback_data data;
+
+ data.infile = 0;
+ data.gb = 0;
+ data.noStatus = noStatus;
+
+ if (filename) {
+ if (!contentLength) {
+ struct stat statbuf;
+ // Stat the file to get its length
+ if (stat(filename, &statbuf) == -1) {
+ fprintf(stderr, "\nERROR: Failed to stat file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ contentLength = statbuf.st_size;
+ }
+ // Open the file
+ if (!(data.infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+ fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ }
+ else {
+ // Read from stdin. If contentLength is not provided, we have
+ // to read it all in to get contentLength.
+ if (!contentLength) {
+ // Read all if stdin to get the data
+ char buffer[64 * 1024];
+ while (1) {
+ int amtRead = fread(buffer, 1, sizeof(buffer), stdin);
+ if (amtRead == 0) {
+ break;
+ }
+ if (!growbuffer_append(&(data.gb), buffer, amtRead)) {
+ fprintf(stderr, "\nERROR: Out of memory while reading "
+ "stdin\n");
+ exit(-1);
+ }
+ contentLength += amtRead;
+ if (amtRead < (int) sizeof(buffer)) {
+ break;
+ }
+ }
+ }
+ else {
+ data.infile = stdin;
+ }
+ }
+
+ data.contentLength = data.originalContentLength = contentLength;
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3PutProperties putProperties =
+ {
+ contentType,
+ md5,
+ cacheControl,
+ contentDispositionFilename,
+ contentEncoding,
+ expires,
+ cannedAcl,
+ metaPropertiesCount,
+ metaProperties
+ };
+
+ S3PutObjectHandler putObjectHandler =
+ {
+ { &responsePropertiesCallback, &responseCompleteCallback },
+ &putObjectDataCallback
+ };
+
+ do {
+ S3_put_object(&bucketContext, key, contentLength, &putProperties, 0,
+ &putObjectHandler, &data);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (data.infile) {
+ fclose(data.infile);
+ }
+ else if (data.gb) {
+ growbuffer_destroy(data.gb);
+ }
+
+ if (statusG != S3StatusOK) {
+ printError();
+ }
+ else if (data.contentLength) {
+ fprintf(stderr, "\nERROR: Failed to read remaining %llu bytes from "
+ "input\n", (unsigned long long) data.contentLength);
+ }
+
+ S3_deinitialize();
}
@@ -1627,355 +1627,355 @@ static void put_object(int argc, char **argv, int optindex)
static void copy_object(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: source bucket/key\n");
- usageExit(stderr);
- }
-
- // Split bucket/key
- char *slash = argv[optindex];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (!*slash || !*(slash + 1)) {
- fprintf(stderr, "\nERROR: Invalid source bucket/key name: %s\n",
- argv[optindex]);
- usageExit(stderr);
- }
- *slash++ = 0;
-
- const char *sourceBucketName = argv[optindex++];
- const char *sourceKey = slash;
-
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: "
- "destination bucket/key\n");
- usageExit(stderr);
- }
-
- // Split bucket/key
- slash = argv[optindex];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (!*slash || !*(slash + 1)) {
- fprintf(stderr, "\nERROR: Invalid destination bucket/key name: %s\n",
- argv[optindex]);
- usageExit(stderr);
- }
- *slash++ = 0;
-
- const char *destinationBucketName = argv[optindex++];
- const char *destinationKey = slash;
-
- const char *cacheControl = 0, *contentType = 0;
- const char *contentDispositionFilename = 0, *contentEncoding = 0;
- int64_t expires = -1;
- S3CannedAcl cannedAcl = S3CannedAclPrivate;
- int metaPropertiesCount = 0;
- S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
- int anyPropertiesSet = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, CACHE_CONTROL_PREFIX,
- CACHE_CONTROL_PREFIX_LEN)) {
- cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]);
- anyPropertiesSet = 1;
- }
- else if (!strncmp(param, CONTENT_TYPE_PREFIX,
- CONTENT_TYPE_PREFIX_LEN)) {
- contentType = &(param[CONTENT_TYPE_PREFIX_LEN]);
- anyPropertiesSet = 1;
- }
- else if (!strncmp(param, CONTENT_DISPOSITION_FILENAME_PREFIX,
- CONTENT_DISPOSITION_FILENAME_PREFIX_LEN)) {
- contentDispositionFilename =
- &(param[CONTENT_DISPOSITION_FILENAME_PREFIX_LEN]);
- anyPropertiesSet = 1;
- }
- else if (!strncmp(param, CONTENT_ENCODING_PREFIX,
- CONTENT_ENCODING_PREFIX_LEN)) {
- contentEncoding = &(param[CONTENT_ENCODING_PREFIX_LEN]);
- anyPropertiesSet = 1;
- }
- else if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
- expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
- if (expires < 0) {
- fprintf(stderr, "\nERROR: Invalid expires time "
- "value; ISO 8601 time format required\n");
- usageExit(stderr);
- }
- anyPropertiesSet = 1;
- }
- else if (!strncmp(param, X_AMZ_META_PREFIX, X_AMZ_META_PREFIX_LEN)) {
- if (metaPropertiesCount == S3_MAX_METADATA_COUNT) {
- fprintf(stderr, "\nERROR: Too many x-amz-meta- properties, "
- "limit %lu: %s\n",
- (unsigned long) S3_MAX_METADATA_COUNT, param);
- usageExit(stderr);
- }
- char *name = &(param[X_AMZ_META_PREFIX_LEN]);
- char *value = name;
- while (*value && (*value != '=')) {
- value++;
- }
- if (!*value || !*(value + 1)) {
- fprintf(stderr, "\nERROR: Invalid parameter: %s\n", param);
- usageExit(stderr);
- }
- *value++ = 0;
- metaProperties[metaPropertiesCount].name = name;
- metaProperties[metaPropertiesCount++].value = value;
- anyPropertiesSet = 1;
- }
- else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
- char *val = &(param[CANNED_ACL_PREFIX_LEN]);
- if (!strcmp(val, "private")) {
- cannedAcl = S3CannedAclPrivate;
- }
- else if (!strcmp(val, "public-read")) {
- cannedAcl = S3CannedAclPublicRead;
- }
- else if (!strcmp(val, "public-read-write")) {
- cannedAcl = S3CannedAclPublicReadWrite;
- }
- else if (!strcmp(val, "authenticated-read")) {
- cannedAcl = S3CannedAclAuthenticatedRead;
- }
- else {
- fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
- usageExit(stderr);
- }
- anyPropertiesSet = 1;
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- sourceBucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3PutProperties putProperties =
- {
- contentType,
- 0,
- cacheControl,
- contentDispositionFilename,
- contentEncoding,
- expires,
- cannedAcl,
- metaPropertiesCount,
- metaProperties
- };
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback,
- &responseCompleteCallback
- };
-
- int64_t lastModified;
- char eTag[256];
-
- do {
- S3_copy_object(&bucketContext, sourceKey, destinationBucketName,
- destinationKey, anyPropertiesSet ? &putProperties : 0,
- &lastModified, sizeof(eTag), eTag, 0,
- &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG == S3StatusOK) {
- if (lastModified >= 0) {
- char timebuf[256];
- time_t t = (time_t) lastModified;
- strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
- gmtime(&t));
- printf("Last-Modified: %s\n", timebuf);
- }
- if (eTag[0]) {
- printf("ETag: %s\n", eTag);
- }
- }
- else {
- printError();
- }
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: source bucket/key\n");
+ usageExit(stderr);
+ }
+
+ // Split bucket/key
+ char *slash = argv[optindex];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (!*slash || !*(slash + 1)) {
+ fprintf(stderr, "\nERROR: Invalid source bucket/key name: %s\n",
+ argv[optindex]);
+ usageExit(stderr);
+ }
+ *slash++ = 0;
+
+ const char *sourceBucketName = argv[optindex++];
+ const char *sourceKey = slash;
+
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: "
+ "destination bucket/key\n");
+ usageExit(stderr);
+ }
+
+ // Split bucket/key
+ slash = argv[optindex];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (!*slash || !*(slash + 1)) {
+ fprintf(stderr, "\nERROR: Invalid destination bucket/key name: %s\n",
+ argv[optindex]);
+ usageExit(stderr);
+ }
+ *slash++ = 0;
+
+ const char *destinationBucketName = argv[optindex++];
+ const char *destinationKey = slash;
+
+ const char *cacheControl = 0, *contentType = 0;
+ const char *contentDispositionFilename = 0, *contentEncoding = 0;
+ int64_t expires = -1;
+ S3CannedAcl cannedAcl = S3CannedAclPrivate;
+ int metaPropertiesCount = 0;
+ S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
+ int anyPropertiesSet = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, CACHE_CONTROL_PREFIX,
+ CACHE_CONTROL_PREFIX_LEN)) {
+ cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]);
+ anyPropertiesSet = 1;
+ }
+ else if (!strncmp(param, CONTENT_TYPE_PREFIX,
+ CONTENT_TYPE_PREFIX_LEN)) {
+ contentType = &(param[CONTENT_TYPE_PREFIX_LEN]);
+ anyPropertiesSet = 1;
+ }
+ else if (!strncmp(param, CONTENT_DISPOSITION_FILENAME_PREFIX,
+ CONTENT_DISPOSITION_FILENAME_PREFIX_LEN)) {
+ contentDispositionFilename =
+ &(param[CONTENT_DISPOSITION_FILENAME_PREFIX_LEN]);
+ anyPropertiesSet = 1;
+ }
+ else if (!strncmp(param, CONTENT_ENCODING_PREFIX,
+ CONTENT_ENCODING_PREFIX_LEN)) {
+ contentEncoding = &(param[CONTENT_ENCODING_PREFIX_LEN]);
+ anyPropertiesSet = 1;
+ }
+ else if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
+ expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
+ if (expires < 0) {
+ fprintf(stderr, "\nERROR: Invalid expires time "
+ "value; ISO 8601 time format required\n");
+ usageExit(stderr);
+ }
+ anyPropertiesSet = 1;
+ }
+ else if (!strncmp(param, X_AMZ_META_PREFIX, X_AMZ_META_PREFIX_LEN)) {
+ if (metaPropertiesCount == S3_MAX_METADATA_COUNT) {
+ fprintf(stderr, "\nERROR: Too many x-amz-meta- properties, "
+ "limit %lu: %s\n",
+ (unsigned long) S3_MAX_METADATA_COUNT, param);
+ usageExit(stderr);
+ }
+ char *name = &(param[X_AMZ_META_PREFIX_LEN]);
+ char *value = name;
+ while (*value && (*value != '=')) {
+ value++;
+ }
+ if (!*value || !*(value + 1)) {
+ fprintf(stderr, "\nERROR: Invalid parameter: %s\n", param);
+ usageExit(stderr);
+ }
+ *value++ = 0;
+ metaProperties[metaPropertiesCount].name = name;
+ metaProperties[metaPropertiesCount++].value = value;
+ anyPropertiesSet = 1;
+ }
+ else if (!strncmp(param, CANNED_ACL_PREFIX, CANNED_ACL_PREFIX_LEN)) {
+ char *val = &(param[CANNED_ACL_PREFIX_LEN]);
+ if (!strcmp(val, "private")) {
+ cannedAcl = S3CannedAclPrivate;
+ }
+ else if (!strcmp(val, "public-read")) {
+ cannedAcl = S3CannedAclPublicRead;
+ }
+ else if (!strcmp(val, "public-read-write")) {
+ cannedAcl = S3CannedAclPublicReadWrite;
+ }
+ else if (!strcmp(val, "authenticated-read")) {
+ cannedAcl = S3CannedAclAuthenticatedRead;
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown canned ACL: %s\n", val);
+ usageExit(stderr);
+ }
+ anyPropertiesSet = 1;
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ sourceBucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3PutProperties putProperties =
+ {
+ contentType,
+ 0,
+ cacheControl,
+ contentDispositionFilename,
+ contentEncoding,
+ expires,
+ cannedAcl,
+ metaPropertiesCount,
+ metaProperties
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback,
+ &responseCompleteCallback
+ };
+
+ int64_t lastModified;
+ char eTag[256];
+
+ do {
+ S3_copy_object(&bucketContext, sourceKey, destinationBucketName,
+ destinationKey, anyPropertiesSet ? &putProperties : 0,
+ &lastModified, sizeof(eTag), eTag, 0,
+ &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG == S3StatusOK) {
+ if (lastModified >= 0) {
+ char timebuf[256];
+ time_t t = (time_t) lastModified;
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+ gmtime(&t));
+ printf("Last-Modified: %s\n", timebuf);
+ }
+ if (eTag[0]) {
+ printf("ETag: %s\n", eTag);
+ }
+ }
+ else {
+ printError();
+ }
+
+ S3_deinitialize();
}
// get object ----------------------------------------------------------------
static S3Status getObjectDataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- FILE *outfile = (FILE *) callbackData;
+ FILE *outfile = (FILE *) callbackData;
- size_t wrote = fwrite(buffer, 1, bufferSize, outfile);
-
- return ((wrote < (size_t) bufferSize) ?
- S3StatusAbortedByCallback : S3StatusOK);
+ size_t wrote = fwrite(buffer, 1, bufferSize, outfile);
+
+ return ((wrote < (size_t) bufferSize) ?
+ S3StatusAbortedByCallback : S3StatusOK);
}
static void get_object(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
- usageExit(stderr);
- }
-
- // Split bucket/key
- char *slash = argv[optindex];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (!*slash || !*(slash + 1)) {
- fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
- argv[optindex]);
- usageExit(stderr);
- }
- *slash++ = 0;
-
- const char *bucketName = argv[optindex++];
- const char *key = slash;
-
- const char *filename = 0;
- int64_t ifModifiedSince = -1, ifNotModifiedSince = -1;
- const char *ifMatch = 0, *ifNotMatch = 0;
- uint64_t startByte = 0, byteCount = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
- filename = &(param[FILENAME_PREFIX_LEN]);
- }
- else if (!strncmp(param, IF_MODIFIED_SINCE_PREFIX,
- IF_MODIFIED_SINCE_PREFIX_LEN)) {
- // Parse ifModifiedSince
- ifModifiedSince = parseIso8601Time
- (&(param[IF_MODIFIED_SINCE_PREFIX_LEN]));
- if (ifModifiedSince < 0) {
- fprintf(stderr, "\nERROR: Invalid ifModifiedSince time "
- "value; ISO 8601 time format required\n");
- usageExit(stderr);
- }
- }
- else if (!strncmp(param, IF_NOT_MODIFIED_SINCE_PREFIX,
- IF_NOT_MODIFIED_SINCE_PREFIX_LEN)) {
- // Parse ifModifiedSince
- ifNotModifiedSince = parseIso8601Time
- (&(param[IF_NOT_MODIFIED_SINCE_PREFIX_LEN]));
- if (ifNotModifiedSince < 0) {
- fprintf(stderr, "\nERROR: Invalid ifNotModifiedSince time "
- "value; ISO 8601 time format required\n");
- usageExit(stderr);
- }
- }
- else if (!strncmp(param, IF_MATCH_PREFIX, IF_MATCH_PREFIX_LEN)) {
- ifMatch = &(param[IF_MATCH_PREFIX_LEN]);
- }
- else if (!strncmp(param, IF_NOT_MATCH_PREFIX,
- IF_NOT_MATCH_PREFIX_LEN)) {
- ifNotMatch = &(param[IF_NOT_MATCH_PREFIX_LEN]);
- }
- else if (!strncmp(param, START_BYTE_PREFIX, START_BYTE_PREFIX_LEN)) {
- startByte = convertInt
- (&(param[START_BYTE_PREFIX_LEN]), "startByte");
- }
- else if (!strncmp(param, BYTE_COUNT_PREFIX, BYTE_COUNT_PREFIX_LEN)) {
- byteCount = convertInt
- (&(param[BYTE_COUNT_PREFIX_LEN]), "byteCount");
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- FILE *outfile = 0;
-
- if (filename) {
- // Stat the file, and if it doesn't exist, open it in w mode
- struct stat buf;
- if (stat(filename, &buf) == -1) {
- outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
- }
- else {
- // Open in r+ so that we don't truncate the file, just in case
- // there is an error and we write no bytes, we leave the file
- // unmodified
- outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
- }
-
- if (!outfile) {
- fprintf(stderr, "\nERROR: Failed to open output file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- }
- else if (showResponsePropertiesG) {
- fprintf(stderr, "\nERROR: get -s requires a filename parameter\n");
- usageExit(stderr);
- }
- else {
- outfile = stdout;
- }
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3GetConditions getConditions =
- {
- ifModifiedSince,
- ifNotModifiedSince,
- ifMatch,
- ifNotMatch
- };
-
- S3GetObjectHandler getObjectHandler =
- {
- { &responsePropertiesCallback, &responseCompleteCallback },
- &getObjectDataCallback
- };
-
- do {
- S3_get_object(&bucketContext, key, &getConditions, startByte,
- byteCount, 0, &getObjectHandler, outfile);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG == S3StatusOK) {
- if (outfile != stdout) {
- ftruncate(fileno(outfile), ftell(outfile));
- }
- }
- else {
- printError();
- }
-
- fclose(outfile);
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
+ usageExit(stderr);
+ }
+
+ // Split bucket/key
+ char *slash = argv[optindex];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (!*slash || !*(slash + 1)) {
+ fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+ argv[optindex]);
+ usageExit(stderr);
+ }
+ *slash++ = 0;
+
+ const char *bucketName = argv[optindex++];
+ const char *key = slash;
+
+ const char *filename = 0;
+ int64_t ifModifiedSince = -1, ifNotModifiedSince = -1;
+ const char *ifMatch = 0, *ifNotMatch = 0;
+ uint64_t startByte = 0, byteCount = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+ filename = &(param[FILENAME_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, IF_MODIFIED_SINCE_PREFIX,
+ IF_MODIFIED_SINCE_PREFIX_LEN)) {
+ // Parse ifModifiedSince
+ ifModifiedSince = parseIso8601Time
+ (&(param[IF_MODIFIED_SINCE_PREFIX_LEN]));
+ if (ifModifiedSince < 0) {
+ fprintf(stderr, "\nERROR: Invalid ifModifiedSince time "
+ "value; ISO 8601 time format required\n");
+ usageExit(stderr);
+ }
+ }
+ else if (!strncmp(param, IF_NOT_MODIFIED_SINCE_PREFIX,
+ IF_NOT_MODIFIED_SINCE_PREFIX_LEN)) {
+ // Parse ifModifiedSince
+ ifNotModifiedSince = parseIso8601Time
+ (&(param[IF_NOT_MODIFIED_SINCE_PREFIX_LEN]));
+ if (ifNotModifiedSince < 0) {
+ fprintf(stderr, "\nERROR: Invalid ifNotModifiedSince time "
+ "value; ISO 8601 time format required\n");
+ usageExit(stderr);
+ }
+ }
+ else if (!strncmp(param, IF_MATCH_PREFIX, IF_MATCH_PREFIX_LEN)) {
+ ifMatch = &(param[IF_MATCH_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, IF_NOT_MATCH_PREFIX,
+ IF_NOT_MATCH_PREFIX_LEN)) {
+ ifNotMatch = &(param[IF_NOT_MATCH_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, START_BYTE_PREFIX, START_BYTE_PREFIX_LEN)) {
+ startByte = convertInt
+ (&(param[START_BYTE_PREFIX_LEN]), "startByte");
+ }
+ else if (!strncmp(param, BYTE_COUNT_PREFIX, BYTE_COUNT_PREFIX_LEN)) {
+ byteCount = convertInt
+ (&(param[BYTE_COUNT_PREFIX_LEN]), "byteCount");
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ FILE *outfile = 0;
+
+ if (filename) {
+ // Stat the file, and if it doesn't exist, open it in w mode
+ struct stat buf;
+ if (stat(filename, &buf) == -1) {
+ outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+ }
+ else {
+ // Open in r+ so that we don't truncate the file, just in case
+ // there is an error and we write no bytes, we leave the file
+ // unmodified
+ outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+ }
+
+ if (!outfile) {
+ fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ }
+ else if (showResponsePropertiesG) {
+ fprintf(stderr, "\nERROR: get -s requires a filename parameter\n");
+ usageExit(stderr);
+ }
+ else {
+ outfile = stdout;
+ }
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3GetConditions getConditions =
+ {
+ ifModifiedSince,
+ ifNotModifiedSince,
+ ifMatch,
+ ifNotMatch
+ };
+
+ S3GetObjectHandler getObjectHandler =
+ {
+ { &responsePropertiesCallback, &responseCompleteCallback },
+ &getObjectDataCallback
+ };
+
+ do {
+ S3_get_object(&bucketContext, key, &getConditions, startByte,
+ byteCount, 0, &getObjectHandler, outfile);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG == S3StatusOK) {
+ if (outfile != stdout) {
+ ftruncate(fileno(outfile), ftell(outfile));
+ }
+ }
+ else {
+ printError();
+ }
+
+ fclose(outfile);
+
+ S3_deinitialize();
}
@@ -1983,62 +1983,62 @@ static void get_object(int argc, char **argv, int optindex)
static void head_object(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
- usageExit(stderr);
- }
-
- // Head implies showing response properties
- showResponsePropertiesG = 1;
-
- // Split bucket/key
- char *slash = argv[optindex];
-
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (!*slash || !*(slash + 1)) {
- fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
- argv[optindex]);
- usageExit(stderr);
- }
- *slash++ = 0;
-
- const char *bucketName = argv[optindex++];
- const char *key = slash;
-
- if (optindex != argc) {
- fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
- usageExit(stderr);
- }
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback,
- &responseCompleteCallback
- };
-
- do {
- S3_head_object(&bucketContext, key, 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if ((statusG != S3StatusOK) &&
- (statusG != S3StatusErrorPreconditionFailed)) {
- printError();
- }
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
+ usageExit(stderr);
+ }
+
+ // Head implies showing response properties
+ showResponsePropertiesG = 1;
+
+ // Split bucket/key
+ char *slash = argv[optindex];
+
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (!*slash || !*(slash + 1)) {
+ fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n",
+ argv[optindex]);
+ usageExit(stderr);
+ }
+ *slash++ = 0;
+
+ const char *bucketName = argv[optindex++];
+ const char *key = slash;
+
+ if (optindex != argc) {
+ fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]);
+ usageExit(stderr);
+ }
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback,
+ &responseCompleteCallback
+ };
+
+ do {
+ S3_head_object(&bucketContext, key, 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if ((statusG != S3StatusOK) &&
+ (statusG != S3StatusErrorPreconditionFailed)) {
+ printError();
+ }
+
+ S3_deinitialize();
}
@@ -2046,75 +2046,75 @@ static void head_object(int argc, char **argv, int optindex)
static void generate_query_string(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex];
- const char *key = 0;
-
- // Split bucket/key
- char *slash = argv[optindex++];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (*slash) {
- *slash++ = 0;
- key = slash;
- }
- else {
- key = 0;
- }
-
- int64_t expires = -1;
-
- const char *resource = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
- expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
- if (expires < 0) {
- fprintf(stderr, "\nERROR: Invalid expires time "
- "value; ISO 8601 time format required\n");
- usageExit(stderr);
- }
- }
- else if (!strncmp(param, RESOURCE_PREFIX, RESOURCE_PREFIX_LEN)) {
- resource = &(param[RESOURCE_PREFIX_LEN]);
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- char buffer[S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE];
-
- S3Status status = S3_generate_authenticated_query_string
- (buffer, &bucketContext, key, expires, resource);
-
- if (status != S3StatusOK) {
- printf("Failed to generate authenticated query string: %s\n",
- S3_get_status_name(status));
- }
- else {
- printf("%s\n", buffer);
- }
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex];
+ const char *key = 0;
+
+ // Split bucket/key
+ char *slash = argv[optindex++];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (*slash) {
+ *slash++ = 0;
+ key = slash;
+ }
+ else {
+ key = 0;
+ }
+
+ int64_t expires = -1;
+
+ const char *resource = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) {
+ expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN]));
+ if (expires < 0) {
+ fprintf(stderr, "\nERROR: Invalid expires time "
+ "value; ISO 8601 time format required\n");
+ usageExit(stderr);
+ }
+ }
+ else if (!strncmp(param, RESOURCE_PREFIX, RESOURCE_PREFIX_LEN)) {
+ resource = &(param[RESOURCE_PREFIX_LEN]);
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ char buffer[S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE];
+
+ S3Status status = S3_generate_authenticated_query_string
+ (buffer, &bucketContext, key, expires, resource);
+
+ if (status != S3StatusOK) {
+ printf("Failed to generate authenticated query string: %s\n",
+ S3_get_status_name(status));
+ }
+ else {
+ printf("%s\n", buffer);
+ }
+
+ S3_deinitialize();
}
@@ -2122,166 +2122,166 @@ static void generate_query_string(int argc, char **argv, int optindex)
void get_acl(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex];
- const char *key = 0;
-
- // Split bucket/key
- char *slash = argv[optindex++];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (*slash) {
- *slash++ = 0;
- key = slash;
- }
- else {
- key = 0;
- }
-
- const char *filename = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
- filename = &(param[FILENAME_PREFIX_LEN]);
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- FILE *outfile = 0;
-
- if (filename) {
- // Stat the file, and if it doesn't exist, open it in w mode
- struct stat buf;
- if (stat(filename, &buf) == -1) {
- outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
- }
- else {
- // Open in r+ so that we don't truncate the file, just in case
- // there is an error and we write no bytes, we leave the file
- // unmodified
- outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
- }
-
- if (!outfile) {
- fprintf(stderr, "\nERROR: Failed to open output file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- }
- else if (showResponsePropertiesG) {
- fprintf(stderr, "\nERROR: getacl -s requires a filename parameter\n");
- usageExit(stderr);
- }
- else {
- outfile = stdout;
- }
-
- int aclGrantCount;
- S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
- char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
- char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback,
- &responseCompleteCallback
- };
-
- do {
- S3_get_acl(&bucketContext, key, ownerId, ownerDisplayName,
- &aclGrantCount, aclGrants, 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG == S3StatusOK) {
- fprintf(outfile, "OwnerID %s %s\n", ownerId, ownerDisplayName);
- fprintf(outfile, "%-6s %-90s %-12s\n", " Type",
- " User Identifier",
- " Permission");
- fprintf(outfile, "------ "
- "------------------------------------------------------------"
- "------------------------------ ------------\n");
- int i;
- for (i = 0; i < aclGrantCount; i++) {
- S3AclGrant *grant = &(aclGrants[i]);
- const char *type;
- char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
- S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
- const char *id;
-
- switch (grant->granteeType) {
- case S3GranteeTypeAmazonCustomerByEmail:
- type = "Email";
- id = grant->grantee.amazonCustomerByEmail.emailAddress;
- break;
- case S3GranteeTypeCanonicalUser:
- type = "UserID";
- snprintf(composedId, sizeof(composedId),
- "%s (%s)", grant->grantee.canonicalUser.id,
- grant->grantee.canonicalUser.displayName);
- id = composedId;
- break;
- case S3GranteeTypeAllAwsUsers:
- type = "Group";
- id = "Authenticated AWS Users";
- break;
- case S3GranteeTypeAllUsers:
- type = "Group";
- id = "All Users";
- break;
- default:
- type = "Group";
- id = "Log Delivery";
- break;
- }
- const char *perm;
- switch (grant->permission) {
- case S3PermissionRead:
- perm = "READ";
- break;
- case S3PermissionWrite:
- perm = "WRITE";
- break;
- case S3PermissionReadACP:
- perm = "READ_ACP";
- break;
- case S3PermissionWriteACP:
- perm = "WRITE_ACP";
- break;
- default:
- perm = "FULL_CONTROL";
- break;
- }
- fprintf(outfile, "%-6s %-90s %-12s\n", type, id, perm);
- }
- }
- else {
- printError();
- }
-
- fclose(outfile);
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex];
+ const char *key = 0;
+
+ // Split bucket/key
+ char *slash = argv[optindex++];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (*slash) {
+ *slash++ = 0;
+ key = slash;
+ }
+ else {
+ key = 0;
+ }
+
+ const char *filename = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+ filename = &(param[FILENAME_PREFIX_LEN]);
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ FILE *outfile = 0;
+
+ if (filename) {
+ // Stat the file, and if it doesn't exist, open it in w mode
+ struct stat buf;
+ if (stat(filename, &buf) == -1) {
+ outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+ }
+ else {
+ // Open in r+ so that we don't truncate the file, just in case
+ // there is an error and we write no bytes, we leave the file
+ // unmodified
+ outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+ }
+
+ if (!outfile) {
+ fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ }
+ else if (showResponsePropertiesG) {
+ fprintf(stderr, "\nERROR: getacl -s requires a filename parameter\n");
+ usageExit(stderr);
+ }
+ else {
+ outfile = stdout;
+ }
+
+ int aclGrantCount;
+ S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+ char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
+ char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback,
+ &responseCompleteCallback
+ };
+
+ do {
+ S3_get_acl(&bucketContext, key, ownerId, ownerDisplayName,
+ &aclGrantCount, aclGrants, 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG == S3StatusOK) {
+ fprintf(outfile, "OwnerID %s %s\n", ownerId, ownerDisplayName);
+ fprintf(outfile, "%-6s %-90s %-12s\n", " Type",
+ " User Identifier",
+ " Permission");
+ fprintf(outfile, "------ "
+ "------------------------------------------------------------"
+ "------------------------------ ------------\n");
+ int i;
+ for (i = 0; i < aclGrantCount; i++) {
+ S3AclGrant *grant = &(aclGrants[i]);
+ const char *type;
+ char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
+ S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
+ const char *id;
+
+ switch (grant->granteeType) {
+ case S3GranteeTypeAmazonCustomerByEmail:
+ type = "Email";
+ id = grant->grantee.amazonCustomerByEmail.emailAddress;
+ break;
+ case S3GranteeTypeCanonicalUser:
+ type = "UserID";
+ snprintf(composedId, sizeof(composedId),
+ "%s (%s)", grant->grantee.canonicalUser.id,
+ grant->grantee.canonicalUser.displayName);
+ id = composedId;
+ break;
+ case S3GranteeTypeAllAwsUsers:
+ type = "Group";
+ id = "Authenticated AWS Users";
+ break;
+ case S3GranteeTypeAllUsers:
+ type = "Group";
+ id = "All Users";
+ break;
+ default:
+ type = "Group";
+ id = "Log Delivery";
+ break;
+ }
+ const char *perm;
+ switch (grant->permission) {
+ case S3PermissionRead:
+ perm = "READ";
+ break;
+ case S3PermissionWrite:
+ perm = "WRITE";
+ break;
+ case S3PermissionReadACP:
+ perm = "READ_ACP";
+ break;
+ case S3PermissionWriteACP:
+ perm = "WRITE_ACP";
+ break;
+ default:
+ perm = "FULL_CONTROL";
+ break;
+ }
+ fprintf(outfile, "%-6s %-90s %-12s\n", type, id, perm);
+ }
+ }
+ else {
+ printError();
+ }
+
+ fclose(outfile);
+
+ S3_deinitialize();
}
@@ -2289,99 +2289,99 @@ void get_acl(int argc, char **argv, int optindex)
void set_acl(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex];
- const char *key = 0;
-
- // Split bucket/key
- char *slash = argv[optindex++];
- while (*slash && (*slash != '/')) {
- slash++;
- }
- if (*slash) {
- *slash++ = 0;
- key = slash;
- }
- else {
- key = 0;
- }
-
- const char *filename = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
- filename = &(param[FILENAME_PREFIX_LEN]);
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- FILE *infile;
-
- if (filename) {
- if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
- fprintf(stderr, "\nERROR: Failed to open input file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- }
- else {
- infile = stdin;
- }
-
- // Read in the complete ACL
- char aclBuf[65536];
- aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
- char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
- char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
-
- // Parse it
- int aclGrantCount;
- S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
- if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
- &aclGrantCount, aclGrants)) {
- fprintf(stderr, "\nERROR: Failed to parse ACLs\n");
- fclose(infile);
- exit(-1);
- }
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback,
- &responseCompleteCallback
- };
-
- do {
- S3_set_acl(&bucketContext, key, ownerId, ownerDisplayName,
- aclGrantCount, aclGrants, 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG != S3StatusOK) {
- printError();
- }
-
- fclose(infile);
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex];
+ const char *key = 0;
+
+ // Split bucket/key
+ char *slash = argv[optindex++];
+ while (*slash && (*slash != '/')) {
+ slash++;
+ }
+ if (*slash) {
+ *slash++ = 0;
+ key = slash;
+ }
+ else {
+ key = 0;
+ }
+
+ const char *filename = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+ filename = &(param[FILENAME_PREFIX_LEN]);
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ FILE *infile;
+
+ if (filename) {
+ if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+ fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ }
+ else {
+ infile = stdin;
+ }
+
+ // Read in the complete ACL
+ char aclBuf[65536];
+ aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
+ char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
+ char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+
+ // Parse it
+ int aclGrantCount;
+ S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+ if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
+ &aclGrantCount, aclGrants)) {
+ fprintf(stderr, "\nERROR: Failed to parse ACLs\n");
+ fclose(infile);
+ exit(-1);
+ }
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback,
+ &responseCompleteCallback
+ };
+
+ do {
+ S3_set_acl(&bucketContext, key, ownerId, ownerDisplayName,
+ aclGrantCount, aclGrants, 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG != S3StatusOK) {
+ printError();
+ }
+
+ fclose(infile);
+
+ S3_deinitialize();
}
@@ -2389,157 +2389,157 @@ void set_acl(int argc, char **argv, int optindex)
void get_logging(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex++];
- const char *filename = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
- filename = &(param[FILENAME_PREFIX_LEN]);
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- FILE *outfile = 0;
-
- if (filename) {
- // Stat the file, and if it doesn't exist, open it in w mode
- struct stat buf;
- if (stat(filename, &buf) == -1) {
- outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
- }
- else {
- // Open in r+ so that we don't truncate the file, just in case
- // there is an error and we write no bytes, we leave the file
- // unmodified
- outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
- }
-
- if (!outfile) {
- fprintf(stderr, "\nERROR: Failed to open output file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- }
- else if (showResponsePropertiesG) {
- fprintf(stderr, "\nERROR: getlogging -s requires a filename "
- "parameter\n");
- usageExit(stderr);
- }
- else {
- outfile = stdout;
- }
-
- int aclGrantCount;
- S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
- char targetBucket[S3_MAX_BUCKET_NAME_SIZE];
- char targetPrefix[S3_MAX_KEY_SIZE];
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback,
- &responseCompleteCallback
- };
-
- do {
- S3_get_server_access_logging(&bucketContext, targetBucket, targetPrefix,
- &aclGrantCount, aclGrants, 0,
- &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG == S3StatusOK) {
- if (targetBucket[0]) {
- printf("Target Bucket: %s\n", targetBucket);
- if (targetPrefix[0]) {
- printf("Target Prefix: %s\n", targetPrefix);
- }
- fprintf(outfile, "%-6s %-90s %-12s\n", " Type",
- " User Identifier",
- " Permission");
- fprintf(outfile, "------ "
- "---------------------------------------------------------"
- "--------------------------------- ------------\n");
- int i;
- for (i = 0; i < aclGrantCount; i++) {
- S3AclGrant *grant = &(aclGrants[i]);
- const char *type;
- char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
- S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
- const char *id;
-
- switch (grant->granteeType) {
- case S3GranteeTypeAmazonCustomerByEmail:
- type = "Email";
- id = grant->grantee.amazonCustomerByEmail.emailAddress;
- break;
- case S3GranteeTypeCanonicalUser:
- type = "UserID";
- snprintf(composedId, sizeof(composedId),
- "%s (%s)", grant->grantee.canonicalUser.id,
- grant->grantee.canonicalUser.displayName);
- id = composedId;
- break;
- case S3GranteeTypeAllAwsUsers:
- type = "Group";
- id = "Authenticated AWS Users";
- break;
- default:
- type = "Group";
- id = "All Users";
- break;
- }
- const char *perm;
- switch (grant->permission) {
- case S3PermissionRead:
- perm = "READ";
- break;
- case S3PermissionWrite:
- perm = "WRITE";
- break;
- case S3PermissionReadACP:
- perm = "READ_ACP";
- break;
- case S3PermissionWriteACP:
- perm = "WRITE_ACP";
- break;
- default:
- perm = "FULL_CONTROL";
- break;
- }
- fprintf(outfile, "%-6s %-90s %-12s\n", type, id, perm);
- }
- }
- else {
- printf("Service logging is not enabled for this bucket.\n");
- }
- }
- else {
- printError();
- }
-
- fclose(outfile);
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex++];
+ const char *filename = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+ filename = &(param[FILENAME_PREFIX_LEN]);
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ FILE *outfile = 0;
+
+ if (filename) {
+ // Stat the file, and if it doesn't exist, open it in w mode
+ struct stat buf;
+ if (stat(filename, &buf) == -1) {
+ outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS);
+ }
+ else {
+ // Open in r+ so that we don't truncate the file, just in case
+ // there is an error and we write no bytes, we leave the file
+ // unmodified
+ outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
+ }
+
+ if (!outfile) {
+ fprintf(stderr, "\nERROR: Failed to open output file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ }
+ else if (showResponsePropertiesG) {
+ fprintf(stderr, "\nERROR: getlogging -s requires a filename "
+ "parameter\n");
+ usageExit(stderr);
+ }
+ else {
+ outfile = stdout;
+ }
+
+ int aclGrantCount;
+ S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+ char targetBucket[S3_MAX_BUCKET_NAME_SIZE];
+ char targetPrefix[S3_MAX_KEY_SIZE];
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback,
+ &responseCompleteCallback
+ };
+
+ do {
+ S3_get_server_access_logging(&bucketContext, targetBucket, targetPrefix,
+ &aclGrantCount, aclGrants, 0,
+ &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG == S3StatusOK) {
+ if (targetBucket[0]) {
+ printf("Target Bucket: %s\n", targetBucket);
+ if (targetPrefix[0]) {
+ printf("Target Prefix: %s\n", targetPrefix);
+ }
+ fprintf(outfile, "%-6s %-90s %-12s\n", " Type",
+ " User Identifier",
+ " Permission");
+ fprintf(outfile, "------ "
+ "---------------------------------------------------------"
+ "--------------------------------- ------------\n");
+ int i;
+ for (i = 0; i < aclGrantCount; i++) {
+ S3AclGrant *grant = &(aclGrants[i]);
+ const char *type;
+ char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
+ S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
+ const char *id;
+
+ switch (grant->granteeType) {
+ case S3GranteeTypeAmazonCustomerByEmail:
+ type = "Email";
+ id = grant->grantee.amazonCustomerByEmail.emailAddress;
+ break;
+ case S3GranteeTypeCanonicalUser:
+ type = "UserID";
+ snprintf(composedId, sizeof(composedId),
+ "%s (%s)", grant->grantee.canonicalUser.id,
+ grant->grantee.canonicalUser.displayName);
+ id = composedId;
+ break;
+ case S3GranteeTypeAllAwsUsers:
+ type = "Group";
+ id = "Authenticated AWS Users";
+ break;
+ default:
+ type = "Group";
+ id = "All Users";
+ break;
+ }
+ const char *perm;
+ switch (grant->permission) {
+ case S3PermissionRead:
+ perm = "READ";
+ break;
+ case S3PermissionWrite:
+ perm = "WRITE";
+ break;
+ case S3PermissionReadACP:
+ perm = "READ_ACP";
+ break;
+ case S3PermissionWriteACP:
+ perm = "WRITE_ACP";
+ break;
+ default:
+ perm = "FULL_CONTROL";
+ break;
+ }
+ fprintf(outfile, "%-6s %-90s %-12s\n", type, id, perm);
+ }
+ }
+ else {
+ printf("Service logging is not enabled for this bucket.\n");
+ }
+ }
+ else {
+ printError();
+ }
+
+ fclose(outfile);
+
+ S3_deinitialize();
}
@@ -2547,96 +2547,96 @@ void get_logging(int argc, char **argv, int optindex)
void set_logging(int argc, char **argv, int optindex)
{
- if (optindex == argc) {
- fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
- usageExit(stderr);
- }
-
- const char *bucketName = argv[optindex++];
-
- const char *targetBucket = 0, *targetPrefix = 0, *filename = 0;
-
- while (optindex < argc) {
- char *param = argv[optindex++];
- if (!strncmp(param, TARGET_BUCKET_PREFIX, TARGET_BUCKET_PREFIX_LEN)) {
- targetBucket = &(param[TARGET_BUCKET_PREFIX_LEN]);
- }
- else if (!strncmp(param, TARGET_PREFIX_PREFIX,
- TARGET_PREFIX_PREFIX_LEN)) {
- targetPrefix = &(param[TARGET_PREFIX_PREFIX_LEN]);
- }
- else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
- filename = &(param[FILENAME_PREFIX_LEN]);
- }
- else {
- fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
- usageExit(stderr);
- }
- }
-
- int aclGrantCount = 0;
- S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
-
- if (targetBucket) {
- FILE *infile;
-
- if (filename) {
- if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
- fprintf(stderr, "\nERROR: Failed to open input file %s: ",
- filename);
- perror(0);
- exit(-1);
- }
- }
- else {
- infile = stdin;
- }
-
- // Read in the complete ACL
- char aclBuf[65536];
- aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
- char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
- char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
-
- // Parse it
- if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
- &aclGrantCount, aclGrants)) {
- fprintf(stderr, "\nERROR: Failed to parse ACLs\n");
- fclose(infile);
- exit(-1);
- }
-
- fclose(infile);
- }
-
- S3_init();
-
- S3BucketContext bucketContext =
- {
- bucketName,
- protocolG,
- uriStyleG,
- accessKeyIdG,
- secretAccessKeyG
- };
-
- S3ResponseHandler responseHandler =
- {
- &responsePropertiesCallback,
- &responseCompleteCallback
- };
-
- do {
- S3_set_server_access_logging(&bucketContext, targetBucket,
- targetPrefix, aclGrantCount, aclGrants,
- 0, &responseHandler, 0);
- } while (S3_status_is_retryable(statusG) && should_retry());
-
- if (statusG != S3StatusOK) {
- printError();
- }
-
- S3_deinitialize();
+ if (optindex == argc) {
+ fprintf(stderr, "\nERROR: Missing parameter: bucket\n");
+ usageExit(stderr);
+ }
+
+ const char *bucketName = argv[optindex++];
+
+ const char *targetBucket = 0, *targetPrefix = 0, *filename = 0;
+
+ while (optindex < argc) {
+ char *param = argv[optindex++];
+ if (!strncmp(param, TARGET_BUCKET_PREFIX, TARGET_BUCKET_PREFIX_LEN)) {
+ targetBucket = &(param[TARGET_BUCKET_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, TARGET_PREFIX_PREFIX,
+ TARGET_PREFIX_PREFIX_LEN)) {
+ targetPrefix = &(param[TARGET_PREFIX_PREFIX_LEN]);
+ }
+ else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
+ filename = &(param[FILENAME_PREFIX_LEN]);
+ }
+ else {
+ fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
+ usageExit(stderr);
+ }
+ }
+
+ int aclGrantCount = 0;
+ S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
+
+ if (targetBucket) {
+ FILE *infile;
+
+ if (filename) {
+ if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
+ fprintf(stderr, "\nERROR: Failed to open input file %s: ",
+ filename);
+ perror(0);
+ exit(-1);
+ }
+ }
+ else {
+ infile = stdin;
+ }
+
+ // Read in the complete ACL
+ char aclBuf[65536];
+ aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
+ char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
+ char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
+
+ // Parse it
+ if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
+ &aclGrantCount, aclGrants)) {
+ fprintf(stderr, "\nERROR: Failed to parse ACLs\n");
+ fclose(infile);
+ exit(-1);
+ }
+
+ fclose(infile);
+ }
+
+ S3_init();
+
+ S3BucketContext bucketContext =
+ {
+ bucketName,
+ protocolG,
+ uriStyleG,
+ accessKeyIdG,
+ secretAccessKeyG
+ };
+
+ S3ResponseHandler responseHandler =
+ {
+ &responsePropertiesCallback,
+ &responseCompleteCallback
+ };
+
+ do {
+ S3_set_server_access_logging(&bucketContext, targetBucket,
+ targetPrefix, aclGrantCount, aclGrants,
+ 0, &responseHandler, 0);
+ } while (S3_status_is_retryable(statusG) && should_retry());
+
+ if (statusG != S3StatusOK) {
+ printError();
+ }
+
+ S3_deinitialize();
}
@@ -2644,132 +2644,132 @@ void set_logging(int argc, char **argv, int optindex)
int main(int argc, char **argv)
{
- // Parse args
- while (1) {
- int idx = 0;
- int c = getopt_long(argc, argv, "fhusr:", longOptionsG, &idx);
-
- if (c == -1) {
- // End of options
- break;
- }
-
- switch (c) {
- case 'f':
- forceG = 1;
- break;
- case 'h':
- uriStyleG = S3UriStyleVirtualHost;
- break;
- case 'u':
- protocolG = S3ProtocolHTTP;
- break;
- case 's':
- showResponsePropertiesG = 1;
- break;
- case 'r': {
- const char *v = optarg;
- while (*v) {
- retriesG *= 10;
- retriesG += *v - '0';
- v++;
- }
- break;
- }
- default:
- fprintf(stderr, "\nERROR: Unknown option: -%c\n", c);
- // Usage exit
- usageExit(stderr);
- }
- }
-
- // The first non-option argument gives the operation to perform
- if (optind == argc) {
- fprintf(stderr, "\n\nERROR: Missing argument: command\n\n");
- usageExit(stderr);
- }
-
- const char *command = argv[optind++];
-
- if (!strcmp(command, "help")) {
- fprintf(stdout, "\ns3 is a program for performing single requests "
- "to Amazon S3.\n");
- usageExit(stdout);
- }
-
- accessKeyIdG = getenv("S3_ACCESS_KEY_ID");
- if (!accessKeyIdG) {
- fprintf(stderr, "Missing environment variable: S3_ACCESS_KEY_ID\n");
- return -1;
- }
- secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY");
- if (!secretAccessKeyG) {
- fprintf(stderr,
- "Missing environment variable: S3_SECRET_ACCESS_KEY\n");
- return -1;
- }
-
- if (!strcmp(command, "list")) {
- list(argc, argv, optind);
- }
- else if (!strcmp(command, "test")) {
- test_bucket(argc, argv, optind);
- }
- else if (!strcmp(command, "create")) {
- create_bucket(argc, argv, optind);
- }
- else if (!strcmp(command, "delete")) {
- if (optind == argc) {
- fprintf(stderr,
- "\nERROR: Missing parameter: bucket or bucket/key\n");
- usageExit(stderr);
- }
- char *val = argv[optind];
- int hasSlash = 0;
- while (*val) {
- if (*val++ == '/') {
- hasSlash = 1;
- break;
- }
- }
- if (hasSlash) {
- delete_object(argc, argv, optind);
- }
- else {
- delete_bucket(argc, argv, optind);
- }
- }
- else if (!strcmp(command, "put")) {
- put_object(argc, argv, optind);
- }
- else if (!strcmp(command, "copy")) {
- copy_object(argc, argv, optind);
- }
- else if (!strcmp(command, "get")) {
- get_object(argc, argv, optind);
- }
- else if (!strcmp(command, "head")) {
- head_object(argc, argv, optind);
- }
- else if (!strcmp(command, "gqs")) {
- generate_query_string(argc, argv, optind);
- }
- else if (!strcmp(command, "getacl")) {
- get_acl(argc, argv, optind);
- }
- else if (!strcmp(command, "setacl")) {
- set_acl(argc, argv, optind);
- }
- else if (!strcmp(command, "getlogging")) {
- get_logging(argc, argv, optind);
- }
- else if (!strcmp(command, "setlogging")) {
- set_logging(argc, argv, optind);
- }
- else {
- fprintf(stderr, "Unknown command: %s\n", command);
- return -1;
- }
-
- return 0;
+ // Parse args
+ while (1) {
+ int idx = 0;
+ int c = getopt_long(argc, argv, "fhusr:", longOptionsG, &idx);
+
+ if (c == -1) {
+ // End of options
+ break;
+ }
+
+ switch (c) {
+ case 'f':
+ forceG = 1;
+ break;
+ case 'h':
+ uriStyleG = S3UriStyleVirtualHost;
+ break;
+ case 'u':
+ protocolG = S3ProtocolHTTP;
+ break;
+ case 's':
+ showResponsePropertiesG = 1;
+ break;
+ case 'r': {
+ const char *v = optarg;
+ while (*v) {
+ retriesG *= 10;
+ retriesG += *v - '0';
+ v++;
+ }
+ break;
+ }
+ default:
+ fprintf(stderr, "\nERROR: Unknown option: -%c\n", c);
+ // Usage exit
+ usageExit(stderr);
+ }
+ }
+
+ // The first non-option argument gives the operation to perform
+ if (optind == argc) {
+ fprintf(stderr, "\n\nERROR: Missing argument: command\n\n");
+ usageExit(stderr);
+ }
+
+ const char *command = argv[optind++];
+
+ if (!strcmp(command, "help")) {
+ fprintf(stdout, "\ns3 is a program for performing single requests "
+ "to Amazon S3.\n");
+ usageExit(stdout);
+ }
+
+ accessKeyIdG = getenv("S3_ACCESS_KEY_ID");
+ if (!accessKeyIdG) {
+ fprintf(stderr, "Missing environment variable: S3_ACCESS_KEY_ID\n");
+ return -1;
+ }
+ secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY");
+ if (!secretAccessKeyG) {
+ fprintf(stderr,
+ "Missing environment variable: S3_SECRET_ACCESS_KEY\n");
+ return -1;
+ }
+
+ if (!strcmp(command, "list")) {
+ list(argc, argv, optind);
+ }
+ else if (!strcmp(command, "test")) {
+ test_bucket(argc, argv, optind);
+ }
+ else if (!strcmp(command, "create")) {
+ create_bucket(argc, argv, optind);
+ }
+ else if (!strcmp(command, "delete")) {
+ if (optind == argc) {
+ fprintf(stderr,
+ "\nERROR: Missing parameter: bucket or bucket/key\n");
+ usageExit(stderr);
+ }
+ char *val = argv[optind];
+ int hasSlash = 0;
+ while (*val) {
+ if (*val++ == '/') {
+ hasSlash = 1;
+ break;
+ }
+ }
+ if (hasSlash) {
+ delete_object(argc, argv, optind);
+ }
+ else {
+ delete_bucket(argc, argv, optind);
+ }
+ }
+ else if (!strcmp(command, "put")) {
+ put_object(argc, argv, optind);
+ }
+ else if (!strcmp(command, "copy")) {
+ copy_object(argc, argv, optind);
+ }
+ else if (!strcmp(command, "get")) {
+ get_object(argc, argv, optind);
+ }
+ else if (!strcmp(command, "head")) {
+ head_object(argc, argv, optind);
+ }
+ else if (!strcmp(command, "gqs")) {
+ generate_query_string(argc, argv, optind);
+ }
+ else if (!strcmp(command, "getacl")) {
+ get_acl(argc, argv, optind);
+ }
+ else if (!strcmp(command, "setacl")) {
+ set_acl(argc, argv, optind);
+ }
+ else if (!strcmp(command, "getlogging")) {
+ get_logging(argc, argv, optind);
+ }
+ else if (!strcmp(command, "setlogging")) {
+ set_logging(argc, argv, optind);
+ }
+ else {
+ fprintf(stderr, "Unknown command: %s\n", command);
+ return -1;
+ }
+
+ return 0;
}
diff --git a/src/service.c b/src/service.c
index dbef8ed..216b981 100644
--- a/src/service.c
+++ b/src/service.c
@@ -33,155 +33,155 @@
typedef struct XmlCallbackData
{
- SimpleXml simpleXml;
-
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ListServiceCallback *listServiceCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
-
- string_buffer(ownerId, 256);
- string_buffer(ownerDisplayName, 256);
- string_buffer(bucketName, 256);
- string_buffer(creationDate, 128);
+ SimpleXml simpleXml;
+
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ListServiceCallback *listServiceCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
+
+ string_buffer(ownerId, 256);
+ string_buffer(ownerDisplayName, 256);
+ string_buffer(bucketName, 256);
+ string_buffer(creationDate, 128);
} XmlCallbackData;
static S3Status xmlCallback(const char *elementPath, const char *data,
- int dataLen, void *callbackData)
+ int dataLen, void *callbackData)
{
- XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
-
- int fit;
-
- if (data) {
- if (!strcmp(elementPath, "ListAllMyBucketsResult/Owner/ID")) {
- string_buffer_append(cbData->ownerId, data, dataLen, fit);
- }
- else if (!strcmp(elementPath,
- "ListAllMyBucketsResult/Owner/DisplayName")) {
- string_buffer_append(cbData->ownerDisplayName, data, dataLen, fit);
- }
- else if (!strcmp(elementPath,
- "ListAllMyBucketsResult/Buckets/Bucket/Name")) {
- string_buffer_append(cbData->bucketName, data, dataLen, fit);
- }
- else if (!strcmp
- (elementPath,
- "ListAllMyBucketsResult/Buckets/Bucket/CreationDate")) {
- string_buffer_append(cbData->creationDate, data, dataLen, fit);
- }
- }
- else {
- if (!strcmp(elementPath, "ListAllMyBucketsResult/Buckets/Bucket")) {
- // Parse date. Assume ISO-8601 date format.
- time_t creationDate = parseIso8601Time(cbData->creationDate);
-
- // Make the callback - a bucket just finished
- S3Status status = (*(cbData->listServiceCallback))
- (cbData->ownerId, cbData->ownerDisplayName,
- cbData->bucketName, creationDate, cbData->callbackData);
-
- string_buffer_initialize(cbData->bucketName);
- string_buffer_initialize(cbData->creationDate);
-
- return status;
- }
- }
-
- return S3StatusOK;
+ XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+
+ int fit;
+
+ if (data) {
+ if (!strcmp(elementPath, "ListAllMyBucketsResult/Owner/ID")) {
+ string_buffer_append(cbData->ownerId, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath,
+ "ListAllMyBucketsResult/Owner/DisplayName")) {
+ string_buffer_append(cbData->ownerDisplayName, data, dataLen, fit);
+ }
+ else if (!strcmp(elementPath,
+ "ListAllMyBucketsResult/Buckets/Bucket/Name")) {
+ string_buffer_append(cbData->bucketName, data, dataLen, fit);
+ }
+ else if (!strcmp
+ (elementPath,
+ "ListAllMyBucketsResult/Buckets/Bucket/CreationDate")) {
+ string_buffer_append(cbData->creationDate, data, dataLen, fit);
+ }
+ }
+ else {
+ if (!strcmp(elementPath, "ListAllMyBucketsResult/Buckets/Bucket")) {
+ // Parse date. Assume ISO-8601 date format.
+ time_t creationDate = parseIso8601Time(cbData->creationDate);
+
+ // Make the callback - a bucket just finished
+ S3Status status = (*(cbData->listServiceCallback))
+ (cbData->ownerId, cbData->ownerDisplayName,
+ cbData->bucketName, creationDate, cbData->callbackData);
+
+ string_buffer_initialize(cbData->bucketName);
+ string_buffer_initialize(cbData->creationDate);
+
+ return status;
+ }
+ }
+
+ return S3StatusOK;
}
static S3Status propertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
-
- return (*(cbData->responsePropertiesCallback))
- (responseProperties, cbData->callbackData);
+ XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+
+ return (*(cbData->responsePropertiesCallback))
+ (responseProperties, cbData->callbackData);
}
static S3Status dataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+ XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
- return simplexml_add(&(cbData->simpleXml), buffer, bufferSize);
+ return simplexml_add(&(cbData->simpleXml), buffer, bufferSize);
}
static void completeCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
+ XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
- (*(cbData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, cbData->callbackData);
+ (*(cbData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, cbData->callbackData);
- simplexml_deinitialize(&(cbData->simpleXml));
+ simplexml_deinitialize(&(cbData->simpleXml));
- free(cbData);
+ free(cbData);
}
void S3_list_service(S3Protocol protocol, const char *accessKeyId,
- const char *secretAccessKey,
- S3RequestContext *requestContext,
- const S3ListServiceHandler *handler, void *callbackData)
+ const char *secretAccessKey,
+ S3RequestContext *requestContext,
+ const S3ListServiceHandler *handler, void *callbackData)
{
- // Create and set up the callback data
- XmlCallbackData *data =
- (XmlCallbackData *) malloc(sizeof(XmlCallbackData));
- if (!data) {
- (*(handler->responseHandler.completeCallback))
- (S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- simplexml_initialize(&(data->simpleXml), &xmlCallback, data);
-
- data->responsePropertiesCallback =
- handler->responseHandler.propertiesCallback;
- data->listServiceCallback = handler->listServiceCallback;
- data->responseCompleteCallback = handler->responseHandler.completeCallback;
- data->callbackData = callbackData;
-
- string_buffer_initialize(data->ownerId);
- string_buffer_initialize(data->ownerDisplayName);
- string_buffer_initialize(data->bucketName);
- string_buffer_initialize(data->creationDate);
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeGET, // httpRequestType
- { 0, // bucketName
- protocol, // protocol
- S3UriStylePath, // uriStyle
- accessKeyId, // accessKeyId
- secretAccessKey }, // secretAccessKey
- 0, // key
- 0, // queryParams
- 0, // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // requestProperties
- &propertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- &dataCallback, // fromS3Callback
- &completeCallback, // completeCallback
- data // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create and set up the callback data
+ XmlCallbackData *data =
+ (XmlCallbackData *) malloc(sizeof(XmlCallbackData));
+ if (!data) {
+ (*(handler->responseHandler.completeCallback))
+ (S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ simplexml_initialize(&(data->simpleXml), &xmlCallback, data);
+
+ data->responsePropertiesCallback =
+ handler->responseHandler.propertiesCallback;
+ data->listServiceCallback = handler->listServiceCallback;
+ data->responseCompleteCallback = handler->responseHandler.completeCallback;
+ data->callbackData = callbackData;
+
+ string_buffer_initialize(data->ownerId);
+ string_buffer_initialize(data->ownerDisplayName);
+ string_buffer_initialize(data->bucketName);
+ string_buffer_initialize(data->creationDate);
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeGET, // httpRequestType
+ { 0, // bucketName
+ protocol, // protocol
+ S3UriStylePath, // uriStyle
+ accessKeyId, // accessKeyId
+ secretAccessKey }, // secretAccessKey
+ 0, // key
+ 0, // queryParams
+ 0, // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // requestProperties
+ &propertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ &dataCallback, // fromS3Callback
+ &completeCallback, // completeCallback
+ data // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
diff --git a/src/service_access_logging.c b/src/service_access_logging.c
index fcbce46..cbed2c1 100644
--- a/src/service_access_logging.c
+++ b/src/service_access_logging.c
@@ -34,199 +34,199 @@
typedef struct ConvertBlsData
{
- char *targetBucketReturn;
- int targetBucketReturnLen;
- char *targetPrefixReturn;
- int targetPrefixReturnLen;
- int *aclGrantCountReturn;
- S3AclGrant *aclGrants;
-
- string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE);
- string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE);
- string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
- string_buffer(groupUri, 128);
- string_buffer(permission, 32);
+ char *targetBucketReturn;
+ int targetBucketReturnLen;
+ char *targetPrefixReturn;
+ int targetPrefixReturnLen;
+ int *aclGrantCountReturn;
+ S3AclGrant *aclGrants;
+
+ string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE);
+ string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE);
+ string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE);
+ string_buffer(groupUri, 128);
+ string_buffer(permission, 32);
} ConvertBlsData;
static S3Status convertBlsXmlCallback(const char *elementPath,
- const char *data, int dataLen,
- void *callbackData)
+ const char *data, int dataLen,
+ void *callbackData)
{
- ConvertBlsData *caData = (ConvertBlsData *) callbackData;
-
- int fit;
-
- if (data) {
- if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetBucket")) {
- caData->targetBucketReturnLen +=
- snprintf(&(caData->targetBucketReturn
- [caData->targetBucketReturnLen]),
- 255 - caData->targetBucketReturnLen - 1,
- "%.*s", dataLen, data);
- if (caData->targetBucketReturnLen >= 255) {
- return S3StatusTargetBucketTooLong;
- }
- }
- else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetPrefix")) {
- caData->targetPrefixReturnLen +=
- snprintf(&(caData->targetPrefixReturn
- [caData->targetPrefixReturnLen]),
- 255 - caData->targetPrefixReturnLen - 1,
- "%.*s", dataLen, data);
- if (caData->targetPrefixReturnLen >= 255) {
- return S3StatusTargetPrefixTooLong;
- }
- }
- else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetGrants/Grant/Grantee/EmailAddress")) {
- // AmazonCustomerByEmail
- string_buffer_append(caData->emailAddress, data, dataLen, fit);
- if (!fit) {
- return S3StatusEmailAddressTooLong;
- }
- }
- else if (!strcmp(elementPath,
- "AccessControlPolicy/AccessControlList/Grant/"
- "Grantee/ID")) {
- // CanonicalUser
- string_buffer_append(caData->userId, data, dataLen, fit);
- if (!fit) {
- return S3StatusUserIdTooLong;
- }
- }
- else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetGrants/Grant/Grantee/DisplayName")) {
- // CanonicalUser
- string_buffer_append(caData->userDisplayName, data, dataLen, fit);
- if (!fit) {
- return S3StatusUserDisplayNameTooLong;
- }
- }
- else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetGrants/Grant/Grantee/URI")) {
- // Group
- string_buffer_append(caData->groupUri, data, dataLen, fit);
- if (!fit) {
- return S3StatusGroupUriTooLong;
- }
- }
- else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetGrants/Grant/Permission")) {
- // Permission
- string_buffer_append(caData->permission, data, dataLen, fit);
- if (!fit) {
- return S3StatusPermissionTooLong;
- }
- }
- }
- else {
- if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
- "TargetGrants/Grant")) {
- // A grant has just been completed; so add the next S3AclGrant
- // based on the values read
- if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) {
- return S3StatusTooManyGrants;
- }
-
- S3AclGrant *grant = &(caData->aclGrants
- [*(caData->aclGrantCountReturn)]);
-
- if (caData->emailAddress[0]) {
- grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
- strcpy(grant->grantee.amazonCustomerByEmail.emailAddress,
- caData->emailAddress);
- }
- else if (caData->userId[0] && caData->userDisplayName[0]) {
- grant->granteeType = S3GranteeTypeCanonicalUser;
- strcpy(grant->grantee.canonicalUser.id, caData->userId);
- strcpy(grant->grantee.canonicalUser.displayName,
- caData->userDisplayName);
- }
- else if (caData->groupUri[0]) {
- if (!strcmp(caData->groupUri,
- "http://acs.amazonaws.com/groups/global/"
- "AuthenticatedUsers")) {
- grant->granteeType = S3GranteeTypeAllAwsUsers;
- }
- else if (!strcmp(caData->groupUri,
- "http://acs.amazonaws.com/groups/global/"
- "AllUsers")) {
- grant->granteeType = S3GranteeTypeAllUsers;
- }
- else {
- return S3StatusBadGrantee;
- }
- }
- else {
- return S3StatusBadGrantee;
- }
-
- if (!strcmp(caData->permission, "READ")) {
- grant->permission = S3PermissionRead;
- }
- else if (!strcmp(caData->permission, "WRITE")) {
- grant->permission = S3PermissionWrite;
- }
- else if (!strcmp(caData->permission, "READ_ACP")) {
- grant->permission = S3PermissionReadACP;
- }
- else if (!strcmp(caData->permission, "WRITE_ACP")) {
- grant->permission = S3PermissionWriteACP;
- }
- else if (!strcmp(caData->permission, "FULL_CONTROL")) {
- grant->permission = S3PermissionFullControl;
- }
- else {
- return S3StatusBadPermission;
- }
-
- (*(caData->aclGrantCountReturn))++;
-
- string_buffer_initialize(caData->emailAddress);
- string_buffer_initialize(caData->userId);
- string_buffer_initialize(caData->userDisplayName);
- string_buffer_initialize(caData->groupUri);
- string_buffer_initialize(caData->permission);
- }
- }
-
- return S3StatusOK;
+ ConvertBlsData *caData = (ConvertBlsData *) callbackData;
+
+ int fit;
+
+ if (data) {
+ if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetBucket")) {
+ caData->targetBucketReturnLen +=
+ snprintf(&(caData->targetBucketReturn
+ [caData->targetBucketReturnLen]),
+ 255 - caData->targetBucketReturnLen - 1,
+ "%.*s", dataLen, data);
+ if (caData->targetBucketReturnLen >= 255) {
+ return S3StatusTargetBucketTooLong;
+ }
+ }
+ else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetPrefix")) {
+ caData->targetPrefixReturnLen +=
+ snprintf(&(caData->targetPrefixReturn
+ [caData->targetPrefixReturnLen]),
+ 255 - caData->targetPrefixReturnLen - 1,
+ "%.*s", dataLen, data);
+ if (caData->targetPrefixReturnLen >= 255) {
+ return S3StatusTargetPrefixTooLong;
+ }
+ }
+ else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetGrants/Grant/Grantee/EmailAddress")) {
+ // AmazonCustomerByEmail
+ string_buffer_append(caData->emailAddress, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusEmailAddressTooLong;
+ }
+ }
+ else if (!strcmp(elementPath,
+ "AccessControlPolicy/AccessControlList/Grant/"
+ "Grantee/ID")) {
+ // CanonicalUser
+ string_buffer_append(caData->userId, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusUserIdTooLong;
+ }
+ }
+ else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetGrants/Grant/Grantee/DisplayName")) {
+ // CanonicalUser
+ string_buffer_append(caData->userDisplayName, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusUserDisplayNameTooLong;
+ }
+ }
+ else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetGrants/Grant/Grantee/URI")) {
+ // Group
+ string_buffer_append(caData->groupUri, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusGroupUriTooLong;
+ }
+ }
+ else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetGrants/Grant/Permission")) {
+ // Permission
+ string_buffer_append(caData->permission, data, dataLen, fit);
+ if (!fit) {
+ return S3StatusPermissionTooLong;
+ }
+ }
+ }
+ else {
+ if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/"
+ "TargetGrants/Grant")) {
+ // A grant has just been completed; so add the next S3AclGrant
+ // based on the values read
+ if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) {
+ return S3StatusTooManyGrants;
+ }
+
+ S3AclGrant *grant = &(caData->aclGrants
+ [*(caData->aclGrantCountReturn)]);
+
+ if (caData->emailAddress[0]) {
+ grant->granteeType = S3GranteeTypeAmazonCustomerByEmail;
+ strcpy(grant->grantee.amazonCustomerByEmail.emailAddress,
+ caData->emailAddress);
+ }
+ else if (caData->userId[0] && caData->userDisplayName[0]) {
+ grant->granteeType = S3GranteeTypeCanonicalUser;
+ strcpy(grant->grantee.canonicalUser.id, caData->userId);
+ strcpy(grant->grantee.canonicalUser.displayName,
+ caData->userDisplayName);
+ }
+ else if (caData->groupUri[0]) {
+ if (!strcmp(caData->groupUri,
+ "http://acs.amazonaws.com/groups/global/"
+ "AuthenticatedUsers")) {
+ grant->granteeType = S3GranteeTypeAllAwsUsers;
+ }
+ else if (!strcmp(caData->groupUri,
+ "http://acs.amazonaws.com/groups/global/"
+ "AllUsers")) {
+ grant->granteeType = S3GranteeTypeAllUsers;
+ }
+ else {
+ return S3StatusBadGrantee;
+ }
+ }
+ else {
+ return S3StatusBadGrantee;
+ }
+
+ if (!strcmp(caData->permission, "READ")) {
+ grant->permission = S3PermissionRead;
+ }
+ else if (!strcmp(caData->permission, "WRITE")) {
+ grant->permission = S3PermissionWrite;
+ }
+ else if (!strcmp(caData->permission, "READ_ACP")) {
+ grant->permission = S3PermissionReadACP;
+ }
+ else if (!strcmp(caData->permission, "WRITE_ACP")) {
+ grant->permission = S3PermissionWriteACP;
+ }
+ else if (!strcmp(caData->permission, "FULL_CONTROL")) {
+ grant->permission = S3PermissionFullControl;
+ }
+ else {
+ return S3StatusBadPermission;
+ }
+
+ (*(caData->aclGrantCountReturn))++;
+
+ string_buffer_initialize(caData->emailAddress);
+ string_buffer_initialize(caData->userId);
+ string_buffer_initialize(caData->userDisplayName);
+ string_buffer_initialize(caData->groupUri);
+ string_buffer_initialize(caData->permission);
+ }
+ }
+
+ return S3StatusOK;
}
static S3Status convert_bls(char *blsXml, char *targetBucketReturn,
- char *targetPrefixReturn, int *aclGrantCountReturn,
- S3AclGrant *aclGrants)
+ char *targetPrefixReturn, int *aclGrantCountReturn,
+ S3AclGrant *aclGrants)
{
- ConvertBlsData data;
-
- data.targetBucketReturn = targetBucketReturn;
- data.targetBucketReturn[0] = 0;
- data.targetBucketReturnLen = 0;
- data.targetPrefixReturn = targetPrefixReturn;
- data.targetPrefixReturn[0] = 0;
- data.targetPrefixReturnLen = 0;
- data.aclGrantCountReturn = aclGrantCountReturn;
- data.aclGrants = aclGrants;
- *aclGrantCountReturn = 0;
- string_buffer_initialize(data.emailAddress);
- string_buffer_initialize(data.userId);
- string_buffer_initialize(data.userDisplayName);
- string_buffer_initialize(data.groupUri);
- string_buffer_initialize(data.permission);
-
- // Use a simplexml parser
- SimpleXml simpleXml;
- simplexml_initialize(&simpleXml, &convertBlsXmlCallback, &data);
-
- S3Status status = simplexml_add(&simpleXml, blsXml, strlen(blsXml));
-
- simplexml_deinitialize(&simpleXml);
-
- return status;
+ ConvertBlsData data;
+
+ data.targetBucketReturn = targetBucketReturn;
+ data.targetBucketReturn[0] = 0;
+ data.targetBucketReturnLen = 0;
+ data.targetPrefixReturn = targetPrefixReturn;
+ data.targetPrefixReturn[0] = 0;
+ data.targetPrefixReturnLen = 0;
+ data.aclGrantCountReturn = aclGrantCountReturn;
+ data.aclGrants = aclGrants;
+ *aclGrantCountReturn = 0;
+ string_buffer_initialize(data.emailAddress);
+ string_buffer_initialize(data.userId);
+ string_buffer_initialize(data.userDisplayName);
+ string_buffer_initialize(data.groupUri);
+ string_buffer_initialize(data.permission);
+
+ // Use a simplexml parser
+ SimpleXml simpleXml;
+ simplexml_initialize(&simpleXml, &convertBlsXmlCallback, &data);
+
+ S3Status status = simplexml_add(&simpleXml, blsXml, strlen(blsXml));
+
+ simplexml_deinitialize(&simpleXml);
+
+ return status;
}
@@ -236,119 +236,119 @@ static S3Status convert_bls(char *blsXml, char *targetBucketReturn,
typedef struct GetBlsData
{
- SimpleXml simpleXml;
+ SimpleXml simpleXml;
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- char *targetBucketReturn;
- char *targetPrefixReturn;
- int *aclGrantCountReturn;
- S3AclGrant *aclGrants;
- string_buffer(blsXmlDocument, BLS_XML_DOC_MAXSIZE);
+ char *targetBucketReturn;
+ char *targetPrefixReturn;
+ int *aclGrantCountReturn;
+ S3AclGrant *aclGrants;
+ string_buffer(blsXmlDocument, BLS_XML_DOC_MAXSIZE);
} GetBlsData;
static S3Status getBlsPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- GetBlsData *gsData = (GetBlsData *) callbackData;
-
- return (*(gsData->responsePropertiesCallback))
- (responseProperties, gsData->callbackData);
+ GetBlsData *gsData = (GetBlsData *) callbackData;
+
+ return (*(gsData->responsePropertiesCallback))
+ (responseProperties, gsData->callbackData);
}
static S3Status getBlsDataCallback(int bufferSize, const char *buffer,
- void *callbackData)
+ void *callbackData)
{
- GetBlsData *gsData = (GetBlsData *) callbackData;
+ GetBlsData *gsData = (GetBlsData *) callbackData;
- int fit;
+ int fit;
- string_buffer_append(gsData->blsXmlDocument, buffer, bufferSize, fit);
-
- return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge;
+ string_buffer_append(gsData->blsXmlDocument, buffer, bufferSize, fit);
+
+ return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge;
}
static void getBlsCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- GetBlsData *gsData = (GetBlsData *) callbackData;
+ GetBlsData *gsData = (GetBlsData *) callbackData;
- if (requestStatus == S3StatusOK) {
- // Parse the document
- requestStatus = convert_bls
- (gsData->blsXmlDocument, gsData->targetBucketReturn,
- gsData->targetPrefixReturn, gsData->aclGrantCountReturn,
- gsData->aclGrants);
- }
+ if (requestStatus == S3StatusOK) {
+ // Parse the document
+ requestStatus = convert_bls
+ (gsData->blsXmlDocument, gsData->targetBucketReturn,
+ gsData->targetPrefixReturn, gsData->aclGrantCountReturn,
+ gsData->aclGrants);
+ }
- (*(gsData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, gsData->callbackData);
+ (*(gsData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, gsData->callbackData);
- free(gsData);
+ free(gsData);
}
void S3_get_server_access_logging(const S3BucketContext *bucketContext,
- char *targetBucketReturn,
- char *targetPrefixReturn,
- int *aclGrantCountReturn,
- S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler,
- void *callbackData)
+ char *targetBucketReturn,
+ char *targetPrefixReturn,
+ int *aclGrantCountReturn,
+ S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler,
+ void *callbackData)
{
- // Create the callback data
- GetBlsData *gsData = (GetBlsData *) malloc(sizeof(GetBlsData));
- if (!gsData) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- gsData->responsePropertiesCallback = handler->propertiesCallback;
- gsData->responseCompleteCallback = handler->completeCallback;
- gsData->callbackData = callbackData;
-
- gsData->targetBucketReturn = targetBucketReturn;
- gsData->targetPrefixReturn = targetPrefixReturn;
- gsData->aclGrantCountReturn = aclGrantCountReturn;
- gsData->aclGrants = aclGrants;
- string_buffer_initialize(gsData->blsXmlDocument);
- *aclGrantCountReturn = 0;
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypeGET, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- 0, // key
- 0, // queryParams
- "logging", // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &getBlsPropertiesCallback, // propertiesCallback
- 0, // toS3Callback
- 0, // toS3CallbackTotalSize
- &getBlsDataCallback, // fromS3Callback
- &getBlsCompleteCallback, // completeCallback
- gsData // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ // Create the callback data
+ GetBlsData *gsData = (GetBlsData *) malloc(sizeof(GetBlsData));
+ if (!gsData) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ gsData->responsePropertiesCallback = handler->propertiesCallback;
+ gsData->responseCompleteCallback = handler->completeCallback;
+ gsData->callbackData = callbackData;
+
+ gsData->targetBucketReturn = targetBucketReturn;
+ gsData->targetPrefixReturn = targetPrefixReturn;
+ gsData->aclGrantCountReturn = aclGrantCountReturn;
+ gsData->aclGrants = aclGrants;
+ string_buffer_initialize(gsData->blsXmlDocument);
+ *aclGrantCountReturn = 0;
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypeGET, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ 0, // key
+ 0, // queryParams
+ "logging", // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &getBlsPropertiesCallback, // propertiesCallback
+ 0, // toS3Callback
+ 0, // toS3CallbackTotalSize
+ &getBlsDataCallback, // fromS3Callback
+ &getBlsCompleteCallback, // completeCallback
+ gsData // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
@@ -356,201 +356,201 @@ void S3_get_server_access_logging(const S3BucketContext *bucketContext,
// set server access logging---------------------------------------------------
static S3Status generateSalXmlDocument(const char *targetBucket,
- const char *targetPrefix,
- int aclGrantCount,
- const S3AclGrant *aclGrants,
- int *xmlDocumentLenReturn,
- char *xmlDocument,
- int xmlDocumentBufferSize)
+ const char *targetPrefix,
+ int aclGrantCount,
+ const S3AclGrant *aclGrants,
+ int *xmlDocumentLenReturn,
+ char *xmlDocument,
+ int xmlDocumentBufferSize)
{
- *xmlDocumentLenReturn = 0;
-
-#define append(fmt, ...) \
- do { \
- *xmlDocumentLenReturn += snprintf \
- (&(xmlDocument[*xmlDocumentLenReturn]), \
- xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \
- fmt, __VA_ARGS__); \
- if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) { \
- return S3StatusXmlDocumentTooLarge; \
- } \
- } while (0)
-
- append("%s", "<BucketLoggingStatus "
- "xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\">");
-
- if (targetBucket && targetBucket[0]) {
- append("<LoggingEnabled><TargetBucket>%s</TargetBucket>", targetBucket);
- append("<TargetPrefix>%s</TargetPrefix>",
- targetPrefix ? targetPrefix : "");
-
- if (aclGrantCount) {
- append("%s", "<TargetGrants>");
- int i;
- for (i = 0; i < aclGrantCount; i++) {
- append("%s", "<Grant><Grantee "
- "xmlns:xsi=\"http://www.w3.org/2001/"
- "XMLSchema-instance\" xsi:type=\"");
- const S3AclGrant *grant = &(aclGrants[i]);
- switch (grant->granteeType) {
- case S3GranteeTypeAmazonCustomerByEmail:
- append("AmazonCustomerByEmail\"><EmailAddress>%s"
- "</EmailAddress>",
- grant->grantee.amazonCustomerByEmail.emailAddress);
- break;
- case S3GranteeTypeCanonicalUser:
- append("CanonicalUser\"><ID>%s</ID><DisplayName>%s"
- "</DisplayName>",
- grant->grantee.canonicalUser.id,
- grant->grantee.canonicalUser.displayName);
- break;
- default: // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers:
- append("Group\"><URI>http://acs.amazonaws.com/groups/"
- "global/%s</URI>",
- (grant->granteeType == S3GranteeTypeAllAwsUsers) ?
- "AuthenticatedUsers" : "AllUsers");
- break;
- }
- append("</Grantee><Permission>%s</Permission></Grant>",
- ((grant->permission == S3PermissionRead) ? "READ" :
- (grant->permission == S3PermissionWrite) ? "WRITE" :
- (grant->permission ==
- S3PermissionReadACP) ? "READ_ACP" :
- (grant->permission ==
- S3PermissionWriteACP) ? "WRITE_ACP" : "FULL_CONTROL"));
- }
- append("%s", "</TargetGrants>");
- }
- append("%s", "</LoggingEnabled>");
- }
-
- append("%s", "</BucketLoggingStatus>");
-
- return S3StatusOK;
+ *xmlDocumentLenReturn = 0;
+
+#define append(fmt, ...) \
+ do { \
+ *xmlDocumentLenReturn += snprintf \
+ (&(xmlDocument[*xmlDocumentLenReturn]), \
+ xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \
+ fmt, __VA_ARGS__); \
+ if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) { \
+ return S3StatusXmlDocumentTooLarge; \
+ } \
+ } while (0)
+
+ append("%s", "<BucketLoggingStatus "
+ "xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\">");
+
+ if (targetBucket && targetBucket[0]) {
+ append("<LoggingEnabled><TargetBucket>%s</TargetBucket>", targetBucket);
+ append("<TargetPrefix>%s</TargetPrefix>",
+ targetPrefix ? targetPrefix : "");
+
+ if (aclGrantCount) {
+ append("%s", "<TargetGrants>");
+ int i;
+ for (i = 0; i < aclGrantCount; i++) {
+ append("%s", "<Grant><Grantee "
+ "xmlns:xsi=\"http://www.w3.org/2001/"
+ "XMLSchema-instance\" xsi:type=\"");
+ const S3AclGrant *grant = &(aclGrants[i]);
+ switch (grant->granteeType) {
+ case S3GranteeTypeAmazonCustomerByEmail:
+ append("AmazonCustomerByEmail\"><EmailAddress>%s"
+ "</EmailAddress>",
+ grant->grantee.amazonCustomerByEmail.emailAddress);
+ break;
+ case S3GranteeTypeCanonicalUser:
+ append("CanonicalUser\"><ID>%s</ID><DisplayName>%s"
+ "</DisplayName>",
+ grant->grantee.canonicalUser.id,
+ grant->grantee.canonicalUser.displayName);
+ break;
+ default: // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers:
+ append("Group\"><URI>http://acs.amazonaws.com/groups/"
+ "global/%s</URI>",
+ (grant->granteeType == S3GranteeTypeAllAwsUsers) ?
+ "AuthenticatedUsers" : "AllUsers");
+ break;
+ }
+ append("</Grantee><Permission>%s</Permission></Grant>",
+ ((grant->permission == S3PermissionRead) ? "READ" :
+ (grant->permission == S3PermissionWrite) ? "WRITE" :
+ (grant->permission ==
+ S3PermissionReadACP) ? "READ_ACP" :
+ (grant->permission ==
+ S3PermissionWriteACP) ? "WRITE_ACP" : "FULL_CONTROL"));
+ }
+ append("%s", "</TargetGrants>");
+ }
+ append("%s", "</LoggingEnabled>");
+ }
+
+ append("%s", "</BucketLoggingStatus>");
+
+ return S3StatusOK;
}
typedef struct SetSalData
{
- S3ResponsePropertiesCallback *responsePropertiesCallback;
- S3ResponseCompleteCallback *responseCompleteCallback;
- void *callbackData;
+ S3ResponsePropertiesCallback *responsePropertiesCallback;
+ S3ResponseCompleteCallback *responseCompleteCallback;
+ void *callbackData;
- int salXmlDocumentLen;
- char salXmlDocument[BLS_XML_DOC_MAXSIZE];
- int salXmlDocumentBytesWritten;
+ int salXmlDocumentLen;
+ char salXmlDocument[BLS_XML_DOC_MAXSIZE];
+ int salXmlDocumentBytesWritten;
} SetSalData;
static S3Status setSalPropertiesCallback
- (const S3ResponseProperties *responseProperties, void *callbackData)
+ (const S3ResponseProperties *responseProperties, void *callbackData)
{
- SetSalData *paData = (SetSalData *) callbackData;
-
- return (*(paData->responsePropertiesCallback))
- (responseProperties, paData->callbackData);
+ SetSalData *paData = (SetSalData *) callbackData;
+
+ return (*(paData->responsePropertiesCallback))
+ (responseProperties, paData->callbackData);
}
static int setSalDataCallback(int bufferSize, char *buffer, void *callbackData)
{
- SetSalData *paData = (SetSalData *) callbackData;
+ SetSalData *paData = (SetSalData *) callbackData;
- int remaining = (paData->salXmlDocumentLen -
- paData->salXmlDocumentBytesWritten);
+ int remaining = (paData->salXmlDocumentLen -
+ paData->salXmlDocumentBytesWritten);
- int toCopy = bufferSize > remaining ? remaining : bufferSize;
-
- if (!toCopy) {
- return 0;
- }
+ int toCopy = bufferSize > remaining ? remaining : bufferSize;
+
+ if (!toCopy) {
+ return 0;
+ }
- memcpy(buffer, &(paData->salXmlDocument
- [paData->salXmlDocumentBytesWritten]), toCopy);
+ memcpy(buffer, &(paData->salXmlDocument
+ [paData->salXmlDocumentBytesWritten]), toCopy);
- paData->salXmlDocumentBytesWritten += toCopy;
+ paData->salXmlDocumentBytesWritten += toCopy;
- return toCopy;
+ return toCopy;
}
static void setSalCompleteCallback(S3Status requestStatus,
- const S3ErrorDetails *s3ErrorDetails,
- void *callbackData)
+ const S3ErrorDetails *s3ErrorDetails,
+ void *callbackData)
{
- SetSalData *paData = (SetSalData *) callbackData;
+ SetSalData *paData = (SetSalData *) callbackData;
- (*(paData->responseCompleteCallback))
- (requestStatus, s3ErrorDetails, paData->callbackData);
+ (*(paData->responseCompleteCallback))
+ (requestStatus, s3ErrorDetails, paData->callbackData);
- free(paData);
+ free(paData);
}
void S3_set_server_access_logging(const S3BucketContext *bucketContext,
- const char *targetBucket,
- const char *targetPrefix, int aclGrantCount,
- const S3AclGrant *aclGrants,
- S3RequestContext *requestContext,
- const S3ResponseHandler *handler,
- void *callbackData)
+ const char *targetBucket,
+ const char *targetPrefix, int aclGrantCount,
+ const S3AclGrant *aclGrants,
+ S3RequestContext *requestContext,
+ const S3ResponseHandler *handler,
+ void *callbackData)
{
- if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) {
- (*(handler->completeCallback))
- (S3StatusTooManyGrants, 0, callbackData);
- return;
- }
-
- SetSalData *data = (SetSalData *) malloc(sizeof(SetSalData));
- if (!data) {
- (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
- return;
- }
-
- // Convert aclGrants to XML document
- S3Status status = generateSalXmlDocument
- (targetBucket, targetPrefix, aclGrantCount, aclGrants,
- &(data->salXmlDocumentLen), data->salXmlDocument,
- sizeof(data->salXmlDocument));
- if (status != S3StatusOK) {
- free(data);
- (*(handler->completeCallback))(status, 0, callbackData);
- return;
- }
-
- data->responsePropertiesCallback = handler->propertiesCallback;
- data->responseCompleteCallback = handler->completeCallback;
- data->callbackData = callbackData;
-
- data->salXmlDocumentBytesWritten = 0;
-
- // Set up the RequestParams
- RequestParams params =
- {
- HttpRequestTypePUT, // httpRequestType
- { bucketContext->bucketName, // bucketName
- bucketContext->protocol, // protocol
- bucketContext->uriStyle, // uriStyle
- bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey }, // secretAccessKey
- 0, // key
- 0, // queryParams
- "logging", // subResource
- 0, // copySourceBucketName
- 0, // copySourceKey
- 0, // getConditions
- 0, // startByte
- 0, // byteCount
- 0, // putProperties
- &setSalPropertiesCallback, // propertiesCallback
- &setSalDataCallback, // toS3Callback
- data->salXmlDocumentLen, // toS3CallbackTotalSize
- 0, // fromS3Callback
- &setSalCompleteCallback, // completeCallback
- data // callbackData
- };
-
- // Perform the request
- request_perform(&params, requestContext);
+ if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) {
+ (*(handler->completeCallback))
+ (S3StatusTooManyGrants, 0, callbackData);
+ return;
+ }
+
+ SetSalData *data = (SetSalData *) malloc(sizeof(SetSalData));
+ if (!data) {
+ (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
+ return;
+ }
+
+ // Convert aclGrants to XML document
+ S3Status status = generateSalXmlDocument
+ (targetBucket, targetPrefix, aclGrantCount, aclGrants,
+ &(data->salXmlDocumentLen), data->salXmlDocument,
+ sizeof(data->salXmlDocument));
+ if (status != S3StatusOK) {
+ free(data);
+ (*(handler->completeCallback))(status, 0, callbackData);
+ return;
+ }
+
+ data->responsePropertiesCallback = handler->propertiesCallback;
+ data->responseCompleteCallback = handler->completeCallback;
+ data->callbackData = callbackData;
+
+ data->salXmlDocumentBytesWritten = 0;
+
+ // Set up the RequestParams
+ RequestParams params =
+ {
+ HttpRequestTypePUT, // httpRequestType
+ { bucketContext->bucketName, // bucketName
+ bucketContext->protocol, // protocol
+ bucketContext->uriStyle, // uriStyle
+ bucketContext->accessKeyId, // accessKeyId
+ bucketContext->secretAccessKey }, // secretAccessKey
+ 0, // key
+ 0, // queryParams
+ "logging", // subResource
+ 0, // copySourceBucketName
+ 0, // copySourceKey
+ 0, // getConditions
+ 0, // startByte
+ 0, // byteCount
+ 0, // putProperties
+ &setSalPropertiesCallback, // propertiesCallback
+ &setSalDataCallback, // toS3Callback
+ data->salXmlDocumentLen, // toS3CallbackTotalSize
+ 0, // fromS3Callback
+ &setSalCompleteCallback, // completeCallback
+ data // callbackData
+ };
+
+ // Perform the request
+ request_perform(&params, requestContext);
}
diff --git a/src/simplexml.c b/src/simplexml.c
index 4411824..bd8616b 100644
--- a/src/simplexml.c
+++ b/src/simplexml.c
@@ -28,12 +28,12 @@
#include <string.h>
#include "simplexml.h"
-// Use libxml2 for parsing XML. XML is severely overused in modern
+// Use libxml2 for parsing XML. XML is severely overused in modern
// computing. It is useful for only a very small subset of tasks, but
// software developers who don't know better and are afraid to go against the
// grain use it for everything, and in most cases, it is completely
// inappropriate. Usually, the document structure is severely under-specified
-// as well, as is the case with S3. We do our best by just caring about the
+// as well, as is the case with S3. We do our best by just caring about the
// most important aspects of the S3 "XML document" responses: the elements and
// their values. The SAX API (just about the lamest API ever devised and
// proof that XML sucks - well, the real proof is how crappy all of the XML
@@ -47,161 +47,161 @@
static xmlEntityPtr saxGetEntity(void *user_data, const xmlChar *name)
{
- (void) user_data;
+ (void) user_data;
- return xmlGetPredefinedEntity(name);
+ return xmlGetPredefinedEntity(name);
}
static void saxStartElement(void *user_data, const xmlChar *nameUtf8,
- const xmlChar **attr)
+ const xmlChar **attr)
{
- (void) attr;
-
- SimpleXml *simpleXml = (SimpleXml *) user_data;
-
- if (simpleXml->status != S3StatusOK) {
- return;
- }
-
- // Assume that name has no non-ASCII in it
- char *name = (char *) nameUtf8;
-
- // Append the element to the element path
- int len = strlen(name);
-
- if ((simpleXml->elementPathLen + len + 1) >=
- (int) sizeof(simpleXml->elementPath)) {
- // Cannot handle this element, stop!
- simpleXml->status = S3StatusXmlParseFailure;
- return;
- }
-
- if (simpleXml->elementPathLen) {
- simpleXml->elementPath[simpleXml->elementPathLen++] = '/';
- }
- strcpy(&(simpleXml->elementPath[simpleXml->elementPathLen]), name);
- simpleXml->elementPathLen += len;
+ (void) attr;
+
+ SimpleXml *simpleXml = (SimpleXml *) user_data;
+
+ if (simpleXml->status != S3StatusOK) {
+ return;
+ }
+
+ // Assume that name has no non-ASCII in it
+ char *name = (char *) nameUtf8;
+
+ // Append the element to the element path
+ int len = strlen(name);
+
+ if ((simpleXml->elementPathLen + len + 1) >=
+ (int) sizeof(simpleXml->elementPath)) {
+ // Cannot handle this element, stop!
+ simpleXml->status = S3StatusXmlParseFailure;
+ return;
+ }
+
+ if (simpleXml->elementPathLen) {
+ simpleXml->elementPath[simpleXml->elementPathLen++] = '/';
+ }
+ strcpy(&(simpleXml->elementPath[simpleXml->elementPathLen]), name);
+ simpleXml->elementPathLen += len;
}
static void saxEndElement(void *user_data, const xmlChar *name)
{
- (void) name;
+ (void) name;
- SimpleXml *simpleXml = (SimpleXml *) user_data;
+ SimpleXml *simpleXml = (SimpleXml *) user_data;
- if (simpleXml->status != S3StatusOK) {
- return;
- }
+ if (simpleXml->status != S3StatusOK) {
+ return;
+ }
- // Call back with 0 data
- simpleXml->status = (*(simpleXml->callback))
- (simpleXml->elementPath, 0, 0, simpleXml->callbackData);
+ // Call back with 0 data
+ simpleXml->status = (*(simpleXml->callback))
+ (simpleXml->elementPath, 0, 0, simpleXml->callbackData);
- while ((simpleXml->elementPathLen > 0) &&
- (simpleXml->elementPath[simpleXml->elementPathLen] != '/')) {
- simpleXml->elementPathLen--;
- }
+ while ((simpleXml->elementPathLen > 0) &&
+ (simpleXml->elementPath[simpleXml->elementPathLen] != '/')) {
+ simpleXml->elementPathLen--;
+ }
- simpleXml->elementPath[simpleXml->elementPathLen] = 0;
+ simpleXml->elementPath[simpleXml->elementPathLen] = 0;
}
static void saxCharacters(void *user_data, const xmlChar *ch, int len)
{
- SimpleXml *simpleXml = (SimpleXml *) user_data;
+ SimpleXml *simpleXml = (SimpleXml *) user_data;
- if (simpleXml->status != S3StatusOK) {
- return;
- }
+ if (simpleXml->status != S3StatusOK) {
+ return;
+ }
- simpleXml->status = (*(simpleXml->callback))
- (simpleXml->elementPath, (char *) ch, len, simpleXml->callbackData);
+ simpleXml->status = (*(simpleXml->callback))
+ (simpleXml->elementPath, (char *) ch, len, simpleXml->callbackData);
}
static void saxError(void *user_data, const char *msg, ...)
{
- (void) msg;
+ (void) msg;
- SimpleXml *simpleXml = (SimpleXml *) user_data;
+ SimpleXml *simpleXml = (SimpleXml *) user_data;
- if (simpleXml->status != S3StatusOK) {
- return;
- }
+ if (simpleXml->status != S3StatusOK) {
+ return;
+ }
- simpleXml->status = S3StatusXmlParseFailure;
+ simpleXml->status = S3StatusXmlParseFailure;
}
static struct _xmlSAXHandler saxHandlerG =
{
- 0, // internalSubsetSAXFunc
- 0, // isStandaloneSAXFunc
- 0, // hasInternalSubsetSAXFunc
- 0, // hasExternalSubsetSAXFunc
- 0, // resolveEntitySAXFunc
- &saxGetEntity, // getEntitySAXFunc
- 0, // entityDeclSAXFunc
- 0, // notationDeclSAXFunc
- 0, // attributeDeclSAXFunc
- 0, // elementDeclSAXFunc
- 0, // unparsedEntityDeclSAXFunc
- 0, // setDocumentLocatorSAXFunc
- 0, // startDocumentSAXFunc
- 0, // endDocumentSAXFunc
- &saxStartElement, // startElementSAXFunc
- &saxEndElement, // endElementSAXFunc
- 0, // referenceSAXFunc
- &saxCharacters, // charactersSAXFunc
- 0, // ignorableWhitespaceSAXFunc
- 0, // processingInstructionSAXFunc
- 0, // commentSAXFunc
- 0, // warningSAXFunc
- &saxError, // errorSAXFunc
- &saxError, // fatalErrorSAXFunc
- 0, // getParameterEntitySAXFunc
- &saxCharacters, // cdataBlockSAXFunc
- 0, // externalSubsetSAXFunc
- 0, // initialized
- 0, // _private
- 0, // startElementNsSAX2Func
- 0, // endElementNsSAX2Func
- 0 // xmlStructuredErrorFunc serror;
+ 0, // internalSubsetSAXFunc
+ 0, // isStandaloneSAXFunc
+ 0, // hasInternalSubsetSAXFunc
+ 0, // hasExternalSubsetSAXFunc
+ 0, // resolveEntitySAXFunc
+ &saxGetEntity, // getEntitySAXFunc
+ 0, // entityDeclSAXFunc
+ 0, // notationDeclSAXFunc
+ 0, // attributeDeclSAXFunc
+ 0, // elementDeclSAXFunc
+ 0, // unparsedEntityDeclSAXFunc
+ 0, // setDocumentLocatorSAXFunc
+ 0, // startDocumentSAXFunc
+ 0, // endDocumentSAXFunc
+ &saxStartElement, // startElementSAXFunc
+ &saxEndElement, // endElementSAXFunc
+ 0, // referenceSAXFunc
+ &saxCharacters, // charactersSAXFunc
+ 0, // ignorableWhitespaceSAXFunc
+ 0, // processingInstructionSAXFunc
+ 0, // commentSAXFunc
+ 0, // warningSAXFunc
+ &saxError, // errorSAXFunc
+ &saxError, // fatalErrorSAXFunc
+ 0, // getParameterEntitySAXFunc
+ &saxCharacters, // cdataBlockSAXFunc
+ 0, // externalSubsetSAXFunc
+ 0, // initialized
+ 0, // _private
+ 0, // startElementNsSAX2Func
+ 0, // endElementNsSAX2Func
+ 0 // xmlStructuredErrorFunc serror;
};
void simplexml_initialize(SimpleXml *simpleXml,
- SimpleXmlCallback *callback, void *callbackData)
+ SimpleXmlCallback *callback, void *callbackData)
{
- simpleXml->callback = callback;
- simpleXml->callbackData = callbackData;
- simpleXml->elementPathLen = 0;
- simpleXml->status = S3StatusOK;
- simpleXml->xmlParser = 0;
+ simpleXml->callback = callback;
+ simpleXml->callbackData = callbackData;
+ simpleXml->elementPathLen = 0;
+ simpleXml->status = S3StatusOK;
+ simpleXml->xmlParser = 0;
}
void simplexml_deinitialize(SimpleXml *simpleXml)
{
- if (simpleXml->xmlParser) {
- xmlFreeParserCtxt(simpleXml->xmlParser);
- }
+ if (simpleXml->xmlParser) {
+ xmlFreeParserCtxt(simpleXml->xmlParser);
+ }
}
S3Status simplexml_add(SimpleXml *simpleXml, const char *data, int dataLen)
{
- if (!simpleXml->xmlParser &&
- (!(simpleXml->xmlParser = xmlCreatePushParserCtxt
- (&saxHandlerG, simpleXml, 0, 0, 0)))) {
- return S3StatusInternalError;
- }
-
- if (xmlParseChunk((xmlParserCtxtPtr) simpleXml->xmlParser,
- data, dataLen, 0)) {
- return S3StatusXmlParseFailure;
- }
-
- return simpleXml->status;
+ if (!simpleXml->xmlParser &&
+ (!(simpleXml->xmlParser = xmlCreatePushParserCtxt
+ (&saxHandlerG, simpleXml, 0, 0, 0)))) {
+ return S3StatusInternalError;
+ }
+
+ if (xmlParseChunk((xmlParserCtxtPtr) simpleXml->xmlParser,
+ data, dataLen, 0)) {
+ return S3StatusXmlParseFailure;
+ }
+
+ return simpleXml->status;
}
diff --git a/src/testsimplexml.c b/src/testsimplexml.c
index f163a79..57fba7d 100644
--- a/src/testsimplexml.c
+++ b/src/testsimplexml.c
@@ -31,57 +31,57 @@
#include "simplexml.h"
static S3Status simpleXmlCallback(const char *elementPath, const char *data,
- int dataLen, void *callbackData)
+ int dataLen, void *callbackData)
{
- (void) callbackData;
+ (void) callbackData;
- printf("[%s]: [%.*s]\n", elementPath, dataLen, data);
+ printf("[%s]: [%.*s]\n", elementPath, dataLen, data);
- return S3StatusOK;
+ return S3StatusOK;
}
// The only argument allowed is a specification of the random seed to use
int main(int argc, char **argv)
{
- if (argc > 1) {
- char *arg = argv[1];
- int seed = 0;
- while (*arg) {
- seed *= 10;
- seed += (*arg++ - '0');
- }
-
- srand(seed);
- }
- else {
- srand(time(0));
- }
+ if (argc > 1) {
+ char *arg = argv[1];
+ int seed = 0;
+ while (*arg) {
+ seed *= 10;
+ seed += (*arg++ - '0');
+ }
+
+ srand(seed);
+ }
+ else {
+ srand(time(0));
+ }
- SimpleXml simpleXml;
+ SimpleXml simpleXml;
- simplexml_initialize(&simpleXml, &simpleXmlCallback, 0);
+ simplexml_initialize(&simpleXml, &simpleXmlCallback, 0);
- // Read chunks of 10K from stdin, and then feed them in random chunks
- // to simplexml_add
- char inbuf[10000];
+ // Read chunks of 10K from stdin, and then feed them in random chunks
+ // to simplexml_add
+ char inbuf[10000];
- int amt_read;
- while ((amt_read = fread(inbuf, 1, sizeof(inbuf), stdin)) > 0) {
- char *buf = inbuf;
- while (amt_read) {
- int amt = (rand() % amt_read) + 1;
- S3Status status = simplexml_add(&simpleXml, buf, amt);
- if (status != S3StatusOK) {
- fprintf(stderr, "ERROR: Parse failure: %d\n", status);
- simplexml_deinitialize(&simpleXml);
- return -1;
- }
- buf += amt, amt_read -= amt;
- }
- }
+ int amt_read;
+ while ((amt_read = fread(inbuf, 1, sizeof(inbuf), stdin)) > 0) {
+ char *buf = inbuf;
+ while (amt_read) {
+ int amt = (rand() % amt_read) + 1;
+ S3Status status = simplexml_add(&simpleXml, buf, amt);
+ if (status != S3StatusOK) {
+ fprintf(stderr, "ERROR: Parse failure: %d\n", status);
+ simplexml_deinitialize(&simpleXml);
+ return -1;
+ }
+ buf += amt, amt_read -= amt;
+ }
+ }
- simplexml_deinitialize(&simpleXml);
+ simplexml_deinitialize(&simpleXml);
- return 0;
+ return 0;
}
diff --git a/src/util.c b/src/util.c
index a00899c..0dfa1e3 100644
--- a/src/util.c
+++ b/src/util.c
@@ -29,218 +29,218 @@
#include "util.h"
-// Convenience utility for making the code look nicer. Tests a string
+// Convenience utility for making the code look nicer. Tests a string
// against a format; only the characters specified in the format are
// checked (i.e. if the string is longer than the format, the string still
-// checks out ok). Format characters are:
+// checks out ok). Format characters are:
// d - is a digit
// anything else - is that character
// Returns nonzero the string checks out, zero if it does not.
static int checkString(const char *str, const char *format)
{
- while (*format) {
- if (*format == 'd') {
- if (!isdigit(*str)) {
- return 0;
- }
- }
- else if (*str != *format) {
- return 0;
- }
- str++, format++;
- }
-
- return 1;
+ while (*format) {
+ if (*format == 'd') {
+ if (!isdigit(*str)) {
+ return 0;
+ }
+ }
+ else if (*str != *format) {
+ return 0;
+ }
+ str++, format++;
+ }
+
+ return 1;
}
int urlEncode(char *dest, const char *src, int maxSrcSize)
{
- static const char *urlSafe = "-_.!~*'()/";
- static const char *hex = "0123456789ABCDEF";
-
- int len = 0;
-
- if (src) while (*src) {
- if (++len > maxSrcSize) {
- return 0;
- }
- const char *urlsafe = urlSafe;
- int isurlsafe = 0;
- while (*urlsafe) {
- if (*urlsafe == *src) {
- isurlsafe = 1;
- break;
- }
- urlsafe++;
- }
- if (isurlsafe || isalnum(*src)) {
- *dest++ = *src++;
- }
- else if (*src == ' ') {
- *dest++ = '+';
- src++;
- }
- else {
- *dest++ = '%';
- *dest++ = hex[*src / 16];
- *dest++ = hex[*src % 16];
- src++;
- }
- }
-
- *dest = 0;
-
- return 1;
+ static const char *urlSafe = "-_.!~*'()/";
+ static const char *hex = "0123456789ABCDEF";
+
+ int len = 0;
+
+ if (src) while (*src) {
+ if (++len > maxSrcSize) {
+ return 0;
+ }
+ const char *urlsafe = urlSafe;
+ int isurlsafe = 0;
+ while (*urlsafe) {
+ if (*urlsafe == *src) {
+ isurlsafe = 1;
+ break;
+ }
+ urlsafe++;
+ }
+ if (isurlsafe || isalnum(*src)) {
+ *dest++ = *src++;
+ }
+ else if (*src == ' ') {
+ *dest++ = '+';
+ src++;
+ }
+ else {
+ *dest++ = '%';
+ *dest++ = hex[*src / 16];
+ *dest++ = hex[*src % 16];
+ src++;
+ }
+ }
+
+ *dest = 0;
+
+ return 1;
}
int64_t parseIso8601Time(const char *str)
{
- // Check to make sure that it has a valid format
- if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) {
- return -1;
- }
+ // Check to make sure that it has a valid format
+ if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) {
+ return -1;
+ }
#define nextnum() (((*str - '0') * 10) + (*(str + 1) - '0'))
- // Convert it
- struct tm stm;
- memset(&stm, 0, sizeof(stm));
-
- stm.tm_year = (nextnum() - 19) * 100;
- str += 2;
- stm.tm_year += nextnum();
- str += 3;
-
- stm.tm_mon = nextnum() - 1;
- str += 3;
-
- stm.tm_mday = nextnum();
- str += 3;
-
- stm.tm_hour = nextnum();
- str += 3;
-
- stm.tm_min = nextnum();
- str += 3;
-
- stm.tm_sec = nextnum();
- str += 2;
-
- stm.tm_isdst = -1;
-
- int64_t ret = mktime(&stm);
-
- // Skip the millis
-
- if (*str == '.') {
- str++;
- while (isdigit(*str)) {
- str++;
- }
- }
-
- if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
- int sign = (*str++ == '-') ? -1 : 1;
- int hours = nextnum();
- str += 3;
- int minutes = nextnum();
- ret += (-sign * (((hours * 60) + minutes) * 60));
- }
- // Else it should be Z to be a conformant time string, but we just assume
- // that it is rather than enforcing that
-
- return ret;
+ // Convert it
+ struct tm stm;
+ memset(&stm, 0, sizeof(stm));
+
+ stm.tm_year = (nextnum() - 19) * 100;
+ str += 2;
+ stm.tm_year += nextnum();
+ str += 3;
+
+ stm.tm_mon = nextnum() - 1;
+ str += 3;
+
+ stm.tm_mday = nextnum();
+ str += 3;
+
+ stm.tm_hour = nextnum();
+ str += 3;
+
+ stm.tm_min = nextnum();
+ str += 3;
+
+ stm.tm_sec = nextnum();
+ str += 2;
+
+ stm.tm_isdst = -1;
+
+ int64_t ret = mktime(&stm);
+
+ // Skip the millis
+
+ if (*str == '.') {
+ str++;
+ while (isdigit(*str)) {
+ str++;
+ }
+ }
+
+ if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
+ int sign = (*str++ == '-') ? -1 : 1;
+ int hours = nextnum();
+ str += 3;
+ int minutes = nextnum();
+ ret += (-sign * (((hours * 60) + minutes) * 60));
+ }
+ // Else it should be Z to be a conformant time string, but we just assume
+ // that it is rather than enforcing that
+
+ return ret;
}
uint64_t parseUnsignedInt(const char *str)
{
- // Skip whitespace
- while (isblank(*str)) {
- str++;
- }
+ // Skip whitespace
+ while (isblank(*str)) {
+ str++;
+ }
- uint64_t ret = 0;
+ uint64_t ret = 0;
- while (isdigit(*str)) {
- ret *= 10;
- ret += (*str++ - '0');
- }
+ while (isdigit(*str)) {
+ ret *= 10;
+ ret += (*str++ - '0');
+ }
- return ret;
+ return ret;
}
int base64Encode(const unsigned char *in, int inLen, char *out)
{
- static const char *ENC =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-
- char *original_out = out;
-
- while (inLen) {
- // first 6 bits of char 1
- *out++ = ENC[*in >> 2];
- if (!--inLen) {
- // last 2 bits of char 1, 4 bits of 0
- *out++ = ENC[(*in & 0x3) << 4];
- *out++ = '=';
- *out++ = '=';
- break;
- }
- // last 2 bits of char 1, first 4 bits of char 2
- *out++ = ENC[((*in & 0x3) << 4) | (*(in + 1) >> 4)];
- in++;
- if (!--inLen) {
- // last 4 bits of char 2, 2 bits of 0
- *out++ = ENC[(*in & 0xF) << 2];
- *out++ = '=';
- break;
- }
- // last 4 bits of char 2, first 2 bits of char 3
- *out++ = ENC[((*in & 0xF) << 2) | (*(in + 1) >> 6)];
- in++;
- // last 6 bits of char 3
- *out++ = ENC[*in & 0x3F];
- in++, inLen--;
- }
-
- return (out - original_out);
+ static const char *ENC =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+ char *original_out = out;
+
+ while (inLen) {
+ // first 6 bits of char 1
+ *out++ = ENC[*in >> 2];
+ if (!--inLen) {
+ // last 2 bits of char 1, 4 bits of 0
+ *out++ = ENC[(*in & 0x3) << 4];
+ *out++ = '=';
+ *out++ = '=';
+ break;
+ }
+ // last 2 bits of char 1, first 4 bits of char 2
+ *out++ = ENC[((*in & 0x3) << 4) | (*(in + 1) >> 4)];
+ in++;
+ if (!--inLen) {
+ // last 4 bits of char 2, 2 bits of 0
+ *out++ = ENC[(*in & 0xF) << 2];
+ *out++ = '=';
+ break;
+ }
+ // last 4 bits of char 2, first 2 bits of char 3
+ *out++ = ENC[((*in & 0xF) << 2) | (*(in + 1) >> 6)];
+ in++;
+ // last 6 bits of char 3
+ *out++ = ENC[*in & 0x3F];
+ in++, inLen--;
+ }
+
+ return (out - original_out);
}
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
-#define blk0L(i) (block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) \
- | (rol(block->l[i], 8) & 0x00FF00FF))
+#define blk0L(i) (block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) \
+ | (rol(block->l[i], 8) & 0x00FF00FF))
#define blk0B(i) (block->l[i])
-#define blk(i) (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ \
- block->l[(i + 8) & 15] ^ \
- block->l[(i + 2) & 15] ^ \
- block->l[i & 15], 1))
-
-#define R0_L(v, w, x, y, z, i) \
- z += ((w & (x ^ y)) ^ y) + blk0L(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30);
-#define R0_B(v, w, x, y, z, i) \
- z += ((w & (x ^ y)) ^ y) + blk0B(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30);
-#define R1(v, w, x, y, z, i) \
- z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30);
-#define R2(v, w, x, y, z, i) \
- z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
- w = rol(w, 30);
-#define R3(v, w, x, y, z, i) \
- z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
- w = rol(w, 30);
-#define R4(v, w, x, y, z, i) \
- z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
- w = rol(w, 30);
+#define blk(i) (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ \
+ block->l[(i + 8) & 15] ^ \
+ block->l[(i + 2) & 15] ^ \
+ block->l[i & 15], 1))
+
+#define R0_L(v, w, x, y, z, i) \
+ z += ((w & (x ^ y)) ^ y) + blk0L(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30);
+#define R0_B(v, w, x, y, z, i) \
+ z += ((w & (x ^ y)) ^ y) + blk0B(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30);
+#define R1(v, w, x, y, z, i) \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30);
+#define R2(v, w, x, y, z, i) \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+ w = rol(w, 30);
+#define R3(v, w, x, y, z, i) \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30);
+#define R4(v, w, x, y, z, i) \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w = rol(w, 30);
#define R0A_L(i) R0_L(a, b, c, d, e, i)
#define R0B_L(i) R0_L(b, c, d, e, a, i)
@@ -281,137 +281,137 @@ int base64Encode(const unsigned char *in, int inLen, char *out)
static void SHA1_transform(uint32_t state[5], const unsigned char buffer[64])
{
- uint32_t a, b, c, d, e;
-
- typedef union {
- unsigned char c[64];
- uint32_t l[16];
- } u;
-
- unsigned char w[64];
- u *block = (u *) w;
-
- memcpy(block, buffer, 64);
-
- a = state[0];
- b = state[1];
- c = state[2];
- d = state[3];
- e = state[4];
-
- static uint32_t endianness_indicator = 0x1;
- if (((unsigned char *) &endianness_indicator)[0]) {
- R0A_L( 0);
- R0E_L( 1); R0D_L( 2); R0C_L( 3); R0B_L( 4); R0A_L( 5);
- R0E_L( 6); R0D_L( 7); R0C_L( 8); R0B_L( 9); R0A_L(10);
- R0E_L(11); R0D_L(12); R0C_L(13); R0B_L(14); R0A_L(15);
- }
- else {
- R0A_B( 0);
- R0E_B( 1); R0D_B( 2); R0C_B( 3); R0B_B( 4); R0A_B( 5);
- R0E_B( 6); R0D_B( 7); R0C_B( 8); R0B_B( 9); R0A_B(10);
- R0E_B(11); R0D_B(12); R0C_B(13); R0B_B(14); R0A_B(15);
- }
- R1E(16); R1D(17); R1C(18); R1B(19); R2A(20);
- R2E(21); R2D(22); R2C(23); R2B(24); R2A(25);
- R2E(26); R2D(27); R2C(28); R2B(29); R2A(30);
- R2E(31); R2D(32); R2C(33); R2B(34); R2A(35);
- R2E(36); R2D(37); R2C(38); R2B(39); R3A(40);
- R3E(41); R3D(42); R3C(43); R3B(44); R3A(45);
- R3E(46); R3D(47); R3C(48); R3B(49); R3A(50);
- R3E(51); R3D(52); R3C(53); R3B(54); R3A(55);
- R3E(56); R3D(57); R3C(58); R3B(59); R4A(60);
- R4E(61); R4D(62); R4C(63); R4B(64); R4A(65);
- R4E(66); R4D(67); R4C(68); R4B(69); R4A(70);
- R4E(71); R4D(72); R4C(73); R4B(74); R4A(75);
- R4E(76); R4D(77); R4C(78); R4B(79);
-
- state[0] += a;
- state[1] += b;
- state[2] += c;
- state[3] += d;
- state[4] += e;
+ uint32_t a, b, c, d, e;
+
+ typedef union {
+ unsigned char c[64];
+ uint32_t l[16];
+ } u;
+
+ unsigned char w[64];
+ u *block = (u *) w;
+
+ memcpy(block, buffer, 64);
+
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ static uint32_t endianness_indicator = 0x1;
+ if (((unsigned char *) &endianness_indicator)[0]) {
+ R0A_L( 0);
+ R0E_L( 1); R0D_L( 2); R0C_L( 3); R0B_L( 4); R0A_L( 5);
+ R0E_L( 6); R0D_L( 7); R0C_L( 8); R0B_L( 9); R0A_L(10);
+ R0E_L(11); R0D_L(12); R0C_L(13); R0B_L(14); R0A_L(15);
+ }
+ else {
+ R0A_B( 0);
+ R0E_B( 1); R0D_B( 2); R0C_B( 3); R0B_B( 4); R0A_B( 5);
+ R0E_B( 6); R0D_B( 7); R0C_B( 8); R0B_B( 9); R0A_B(10);
+ R0E_B(11); R0D_B(12); R0C_B(13); R0B_B(14); R0A_B(15);
+ }
+ R1E(16); R1D(17); R1C(18); R1B(19); R2A(20);
+ R2E(21); R2D(22); R2C(23); R2B(24); R2A(25);
+ R2E(26); R2D(27); R2C(28); R2B(29); R2A(30);
+ R2E(31); R2D(32); R2C(33); R2B(34); R2A(35);
+ R2E(36); R2D(37); R2C(38); R2B(39); R3A(40);
+ R3E(41); R3D(42); R3C(43); R3B(44); R3A(45);
+ R3E(46); R3D(47); R3C(48); R3B(49); R3A(50);
+ R3E(51); R3D(52); R3C(53); R3B(54); R3A(55);
+ R3E(56); R3D(57); R3C(58); R3B(59); R4A(60);
+ R4E(61); R4D(62); R4C(63); R4B(64); R4A(65);
+ R4E(66); R4D(67); R4C(68); R4B(69); R4A(70);
+ R4E(71); R4D(72); R4C(73); R4B(74); R4A(75);
+ R4E(76); R4D(77); R4C(78); R4B(79);
+
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
}
typedef struct
{
- uint32_t state[5];
- uint32_t count[2];
- unsigned char buffer[64];
+ uint32_t state[5];
+ uint32_t count[2];
+ unsigned char buffer[64];
} SHA1Context;
static void SHA1_init(SHA1Context *context)
{
- context->state[0] = 0x67452301;
- context->state[1] = 0xEFCDAB89;
- context->state[2] = 0x98BADCFE;
- context->state[3] = 0x10325476;
- context->state[4] = 0xC3D2E1F0;
- context->count[0] = context->count[1] = 0;
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+ context->count[0] = context->count[1] = 0;
}
static void SHA1_update(SHA1Context *context, const unsigned char *data,
- unsigned int len)
+ unsigned int len)
{
- uint32_t i, j;
+ uint32_t i, j;
- j = (context->count[0] >> 3) & 63;
+ j = (context->count[0] >> 3) & 63;
- if ((context->count[0] += len << 3) < (len << 3)) {
- context->count[1]++;
- }
+ if ((context->count[0] += len << 3) < (len << 3)) {
+ context->count[1]++;
+ }
- context->count[1] += (len >> 29);
+ context->count[1] += (len >> 29);
- if ((j + len) > 63) {
- memcpy(&(context->buffer[j]), data, (i = 64 - j));
- SHA1_transform(context->state, context->buffer);
- for ( ; (i + 63) < len; i += 64) {
- SHA1_transform(context->state, &(data[i]));
- }
- j = 0;
- }
- else {
- i = 0;
- }
+ if ((j + len) > 63) {
+ memcpy(&(context->buffer[j]), data, (i = 64 - j));
+ SHA1_transform(context->state, context->buffer);
+ for ( ; (i + 63) < len; i += 64) {
+ SHA1_transform(context->state, &(data[i]));
+ }
+ j = 0;
+ }
+ else {
+ i = 0;
+ }
- memcpy(&(context->buffer[j]), &(data[i]), len - i);
+ memcpy(&(context->buffer[j]), &(data[i]), len - i);
}
static void SHA1_final(unsigned char digest[20], SHA1Context *context)
{
- uint32_t i;
- unsigned char finalcount[8];
+ uint32_t i;
+ unsigned char finalcount[8];
- for (i = 0; i < 8; i++) {
- finalcount[i] = (unsigned char)
- ((context->count[(i >= 4 ? 0 : 1)] >>
- ((3 - (i & 3)) * 8)) & 255);
- }
+ for (i = 0; i < 8; i++) {
+ finalcount[i] = (unsigned char)
+ ((context->count[(i >= 4 ? 0 : 1)] >>
+ ((3 - (i & 3)) * 8)) & 255);
+ }
- SHA1_update(context, (unsigned char *) "\200", 1);
+ SHA1_update(context, (unsigned char *) "\200", 1);
- while ((context->count[0] & 504) != 448) {
- SHA1_update(context, (unsigned char *) "\0", 1);
- }
+ while ((context->count[0] & 504) != 448) {
+ SHA1_update(context, (unsigned char *) "\0", 1);
+ }
- SHA1_update(context, finalcount, 8);
+ SHA1_update(context, finalcount, 8);
- for (i = 0; i < 20; i++) {
- digest[i] = (unsigned char)
- ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
- }
+ for (i = 0; i < 20; i++) {
+ digest[i] = (unsigned char)
+ ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
+ }
- memset(context->buffer, 0, 64);
- memset(context->state, 0, 20);
- memset(context->count, 0, 8);
- memset(&finalcount, 0, 8);
+ memset(context->buffer, 0, 64);
+ memset(context->state, 0, 20);
+ memset(context->count, 0, 8);
+ memset(&finalcount, 0, 8);
- SHA1_transform(context->state, context->buffer);
+ SHA1_transform(context->state, context->buffer);
}
@@ -424,138 +424,138 @@ static void SHA1_final(unsigned char digest[20], SHA1Context *context)
//
// HMAC(K,m) = SHA1((K ^ OPAD) . SHA1((K ^ IPAD) . m))
void HMAC_SHA1(unsigned char hmac[20], const unsigned char *key, int key_len,
- const unsigned char *message, int message_len)
+ const unsigned char *message, int message_len)
{
- unsigned char kopad[64], kipad[64];
- int i;
-
- if (key_len > 64) {
- key_len = 64;
- }
-
- for (i = 0; i < key_len; i++) {
- kopad[i] = key[i] ^ 0x5c;
- kipad[i] = key[i] ^ 0x36;
- }
-
- for ( ; i < 64; i++) {
- kopad[i] = 0 ^ 0x5c;
- kipad[i] = 0 ^ 0x36;
- }
-
- unsigned char digest[20];
-
- SHA1Context context;
-
- SHA1_init(&context);
- SHA1_update(&context, kipad, 64);
- SHA1_update(&context, message, message_len);
- SHA1_final(digest, &context);
-
- SHA1_init(&context);
- SHA1_update(&context, kopad, 64);
- SHA1_update(&context, digest, 20);
- SHA1_final(hmac, &context);
+ unsigned char kopad[64], kipad[64];
+ int i;
+
+ if (key_len > 64) {
+ key_len = 64;
+ }
+
+ for (i = 0; i < key_len; i++) {
+ kopad[i] = key[i] ^ 0x5c;
+ kipad[i] = key[i] ^ 0x36;
+ }
+
+ for ( ; i < 64; i++) {
+ kopad[i] = 0 ^ 0x5c;
+ kipad[i] = 0 ^ 0x36;
+ }
+
+ unsigned char digest[20];
+
+ SHA1Context context;
+
+ SHA1_init(&context);
+ SHA1_update(&context, kipad, 64);
+ SHA1_update(&context, message, message_len);
+ SHA1_final(digest, &context);
+
+ SHA1_init(&context);
+ SHA1_update(&context, kopad, 64);
+ SHA1_update(&context, digest, 20);
+ SHA1_final(hmac, &context);
}
#define rot(x,k) (((x) << (k)) | ((x) >> (32 - (k))))
uint64_t hash(const unsigned char *k, int length)
{
- uint32_t a, b, c;
-
- a = b = c = 0xdeadbeef + ((uint32_t) length);
-
- static uint32_t endianness_indicator = 0x1;
- if (((unsigned char *) &endianness_indicator)[0]) {
- while (length > 12) {
- a += k[0];
- a += ((uint32_t) k[1]) << 8;
- a += ((uint32_t) k[2]) << 16;
- a += ((uint32_t) k[3]) << 24;
- b += k[4];
- b += ((uint32_t) k[5]) << 8;
- b += ((uint32_t) k[6]) << 16;
- b += ((uint32_t) k[7]) << 24;
- c += k[8];
- c += ((uint32_t) k[9]) << 8;
- c += ((uint32_t) k[10]) << 16;
- c += ((uint32_t) k[11]) << 24;
- a -= c; a ^= rot(c, 4); c += b;
- b -= a; b ^= rot(a, 6); a += c;
- c -= b; c ^= rot(b, 8); b += a;
- a -= c; a ^= rot(c, 16); c += b;
- b -= a; b ^= rot(a, 19); a += c;
- c -= b; c ^= rot(b, 4); b += a;
- length -= 12;
- k += 12;
- }
-
- switch(length) {
- case 12: c += ((uint32_t) k[11]) << 24;
- case 11: c += ((uint32_t) k[10]) << 16;
- case 10: c += ((uint32_t) k[9]) << 8;
- case 9 : c += k[8];
- case 8 : b += ((uint32_t) k[7]) << 24;
- case 7 : b += ((uint32_t) k[6]) << 16;
- case 6 : b += ((uint32_t) k[5]) << 8;
- case 5 : b += k[4];
- case 4 : a += ((uint32_t) k[3]) << 24;
- case 3 : a += ((uint32_t) k[2]) << 16;
- case 2 : a += ((uint32_t) k[1]) << 8;
- case 1 : a += k[0]; break;
- case 0 : goto end;
- }
- }
- else {
- while (length > 12) {
- a += ((uint32_t) k[0]) << 24;
- a += ((uint32_t) k[1]) << 16;
- a += ((uint32_t) k[2]) << 8;
- a += ((uint32_t) k[3]);
- b += ((uint32_t) k[4]) << 24;
- b += ((uint32_t) k[5]) << 16;
- b += ((uint32_t) k[6]) << 8;
- b += ((uint32_t) k[7]);
- c += ((uint32_t) k[8]) << 24;
- c += ((uint32_t) k[9]) << 16;
- c += ((uint32_t) k[10]) << 8;
- c += ((uint32_t) k[11]);
- a -= c; a ^= rot(c, 4); c += b;
- b -= a; b ^= rot(a, 6); a += c;
- c -= b; c ^= rot(b, 8); b += a;
- a -= c; a ^= rot(c, 16); c += b;
- b -= a; b ^= rot(a, 19); a += c;
- c -= b; c ^= rot(b, 4); b += a;
- length -= 12;
- k += 12;
- }
-
- switch(length) {
- case 12: c += k[11];
- case 11: c += ((uint32_t) k[10]) << 8;
- case 10: c += ((uint32_t) k[9]) << 16;
- case 9 : c += ((uint32_t) k[8]) << 24;
- case 8 : b += k[7];
- case 7 : b += ((uint32_t) k[6]) << 8;
- case 6 : b += ((uint32_t) k[5]) << 16;
- case 5 : b += ((uint32_t) k[4]) << 24;
- case 4 : a += k[3];
- case 3 : a += ((uint32_t) k[2]) << 8;
- case 2 : a += ((uint32_t) k[1]) << 16;
- case 1 : a += ((uint32_t) k[0]) << 24; break;
- case 0 : goto end;
- }
- }
-
- c ^= b; c -= rot(b, 14);
- a ^= c; a -= rot(c, 11);
- b ^= a; b -= rot(a, 25);
- c ^= b; c -= rot(b, 16);
- a ^= c; a -= rot(c, 4);
- b ^= a; b -= rot(a, 14);
- c ^= b; c -= rot(b, 24);
+ uint32_t a, b, c;
+
+ a = b = c = 0xdeadbeef + ((uint32_t) length);
+
+ static uint32_t endianness_indicator = 0x1;
+ if (((unsigned char *) &endianness_indicator)[0]) {
+ while (length > 12) {
+ a += k[0];
+ a += ((uint32_t) k[1]) << 8;
+ a += ((uint32_t) k[2]) << 16;
+ a += ((uint32_t) k[3]) << 24;
+ b += k[4];
+ b += ((uint32_t) k[5]) << 8;
+ b += ((uint32_t) k[6]) << 16;
+ b += ((uint32_t) k[7]) << 24;
+ c += k[8];
+ c += ((uint32_t) k[9]) << 8;
+ c += ((uint32_t) k[10]) << 16;
+ c += ((uint32_t) k[11]) << 24;
+ a -= c; a ^= rot(c, 4); c += b;
+ b -= a; b ^= rot(a, 6); a += c;
+ c -= b; c ^= rot(b, 8); b += a;
+ a -= c; a ^= rot(c, 16); c += b;
+ b -= a; b ^= rot(a, 19); a += c;
+ c -= b; c ^= rot(b, 4); b += a;
+ length -= 12;
+ k += 12;
+ }
+
+ switch(length) {
+ case 12: c += ((uint32_t) k[11]) << 24;
+ case 11: c += ((uint32_t) k[10]) << 16;
+ case 10: c += ((uint32_t) k[9]) << 8;
+ case 9 : c += k[8];
+ case 8 : b += ((uint32_t) k[7]) << 24;
+ case 7 : b += ((uint32_t) k[6]) << 16;
+ case 6 : b += ((uint32_t) k[5]) << 8;
+ case 5 : b += k[4];
+ case 4 : a += ((uint32_t) k[3]) << 24;
+ case 3 : a += ((uint32_t) k[2]) << 16;
+ case 2 : a += ((uint32_t) k[1]) << 8;
+ case 1 : a += k[0]; break;
+ case 0 : goto end;
+ }
+ }
+ else {
+ while (length > 12) {
+ a += ((uint32_t) k[0]) << 24;
+ a += ((uint32_t) k[1]) << 16;
+ a += ((uint32_t) k[2]) << 8;
+ a += ((uint32_t) k[3]);
+ b += ((uint32_t) k[4]) << 24;
+ b += ((uint32_t) k[5]) << 16;
+ b += ((uint32_t) k[6]) << 8;
+ b += ((uint32_t) k[7]);
+ c += ((uint32_t) k[8]) << 24;
+ c += ((uint32_t) k[9]) << 16;
+ c += ((uint32_t) k[10]) << 8;
+ c += ((uint32_t) k[11]);
+ a -= c; a ^= rot(c, 4); c += b;
+ b -= a; b ^= rot(a, 6); a += c;
+ c -= b; c ^= rot(b, 8); b += a;
+ a -= c; a ^= rot(c, 16); c += b;
+ b -= a; b ^= rot(a, 19); a += c;
+ c -= b; c ^= rot(b, 4); b += a;
+ length -= 12;
+ k += 12;
+ }
+
+ switch(length) {
+ case 12: c += k[11];
+ case 11: c += ((uint32_t) k[10]) << 8;
+ case 10: c += ((uint32_t) k[9]) << 16;
+ case 9 : c += ((uint32_t) k[8]) << 24;
+ case 8 : b += k[7];
+ case 7 : b += ((uint32_t) k[6]) << 8;
+ case 6 : b += ((uint32_t) k[5]) << 16;
+ case 5 : b += ((uint32_t) k[4]) << 24;
+ case 4 : a += k[3];
+ case 3 : a += ((uint32_t) k[2]) << 8;
+ case 2 : a += ((uint32_t) k[1]) << 16;
+ case 1 : a += ((uint32_t) k[0]) << 24; break;
+ case 0 : goto end;
+ }
+ }
+
+ c ^= b; c -= rot(b, 14);
+ a ^= c; a -= rot(c, 11);
+ b ^= a; b -= rot(a, 25);
+ c ^= b; c -= rot(b, 16);
+ a ^= c; a -= rot(c, 4);
+ b ^= a; b -= rot(a, 14);
+ c ^= b; c -= rot(b, 24);
end:
- return ((((uint64_t) c) << 32) | b);
+ return ((((uint64_t) c) << 32) | b);
}