diff --git a/api.go b/api.go index 9ba468ec23..133e5b33c3 100644 --- a/api.go +++ b/api.go @@ -17,724 +17,45 @@ package minio import ( - "bytes" "errors" "io" - "io/ioutil" - "math" "net/http" - "net/url" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" "time" ) -// s3 region map used by bucket location constraint if necessary. -var regions = map[string]string{ - "s3.amazonaws.com": "us-east-1", - "s3-us-west-1.amazonaws.com": "us-west-1", - "s3-us-west-2.amazonaws.com": "us-west-2", - "s3-eu-west-1.amazonaws.com": "eu-west-1", - "s3-eu-central-1.amazonaws.com": "eu-central-1", - "s3-ap-southeast-1.amazonaws.com": "ap-southeast-1", - "s3-ap-southeast-2.amazonaws.com": "ap-southeast-2", - "s3-ap-northeast-1.amazonaws.com": "ap-northeast-1", - "s3-sa-east-1.amazonaws.com": "sa-east-1", - - // Add google cloud storage as one of the regions - "storage.googleapis.com": "google", -} - -// getRegion returns a region based on its endpoint mapping. -func getRegion(host string) (region string) { - if _, ok := regions[host]; ok { - return regions[host] - } - // Region cannot be empty according to Amazon S3 for AWS Signature Version 4. - return "us-east-1" -} - -// getEndpoint returns a endpoint based on its region. -func getEndpoint(region string) (endpoint string) { - for h, r := range regions { - if r == region { - return h - } +// SetAppInfo - add application details to user agent. +func (a *API) SetAppInfo(appName string, appVersion string) { + // if app name and version is not set, we do not a new user agent. + if appName != "" && appVersion != "" { + appUserAgent := appName + "/" + appVersion + a.userAgent = libraryUserAgent + " " + appUserAgent } - return "s3.amazonaws.com" -} - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is Latest i.e SignatureV4. -const ( - Latest SignatureType = iota - SignatureV4 - SignatureV2 -) - -// isV2 - is signature SignatureV2? -func (s SignatureType) isV2() bool { - return s == SignatureV2 -} - -// isV4 - is signature SignatureV4? -func (s SignatureType) isV4() bool { - return s == SignatureV4 } -// isLatest - is signature Latest? -func (s SignatureType) isLatest() bool { - return s == Latest -} - -// Config - main configuration struct used to set endpoint, credentials, and other options for requests. -type Config struct { - /// Standard options - AccessKeyID string // AccessKeyID required for authorized requests. - SecretAccessKey string // SecretAccessKey required for authorized requests. - Endpoint string // host endpoint eg:- https://s3.amazonaws.com - Signature SignatureType // choose a signature type if necessary. - - /// Advanced options - // Optional field. If empty, region is determined automatically. - // Set to override default behavior. - Region string - - /// Really Advanced options - // - // Set this to override default transport ``http.DefaultTransport`` +// SetCustomTransport - set new custom transport. +func (a *API) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport ``http.DefaultTransport``. // // This transport is usually needed for debugging OR to add your own // custom TLS certificates on the client transport, for custom CA's and // certs which are not part of standard certificate authority follow this - // example:- + // example :- // // tr := &http.Transport{ // TLSClientConfig: &tls.Config{RootCAs: pool}, // DisableCompression: true, // } + // api.SetTransport(tr) // - Transport http.RoundTripper - - /// Internal options - // use SetAppInfo add application details in user agent. - userAgent string - isVirtualHostedStyle bool // set when virtual hostnames are on -} - -// Global constants -const ( - libraryName = "minio-go" - libraryVersion = "0.2.5" -) - -// User Agent should always following the below style. -// Please open an issue to discuss any new changes here. -// -// Minio (OS; ARCH) LIB/VER APP/VER -const ( - libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " - libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion -) - -// isAnonymous - True if config doesn't have access and secret keys. -func (c *Config) isAnonymous() bool { - if c.AccessKeyID != "" && c.SecretAccessKey != "" { - return false - } - return true -} - -// setBucketRegion fetches the region and updates config, -// additionally it also constructs a proper endpoint based on that region. -func (c *Config) setBucketRegion() error { - u, err := url.Parse(c.Endpoint) - if err != nil { - return err - } - - if !c.isVirtualHostedStyle { - c.Region = getRegion(u.Host) - return nil - } - - var bucketName, host string - hostIndex := strings.Index(u.Host, "s3") - if hostIndex == -1 { - hostIndex = strings.Index(u.Host, "storage.googleapis.com") - } - if hostIndex > 0 { - host = u.Host[hostIndex:] - bucketName = u.Host[:hostIndex-1] - } - - genericGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host) - if genericGoogle { - // returning standard region for google for now, can be changed in future - // to query for region in case it is useful - c.Region = getRegion(host) - return nil - } - genericS3, _ := filepath.Match("*.s3.amazonaws.com", u.Host) - if !genericS3 { - c.Region = getRegion(host) - return nil - } - - // query aws s3 for the region for case of bucketName.s3.amazonaws.com - u.Host = host - tempConfig := Config{} - tempConfig.AccessKeyID = c.AccessKeyID - tempConfig.SecretAccessKey = c.SecretAccessKey - tempConfig.Endpoint = u.String() - tempConfig.Region = getRegion(u.Host) - tempConfig.isVirtualHostedStyle = false - s3API := API{s3API{&tempConfig}} - region, err := s3API.getBucketLocation(bucketName) - if err != nil { - return err - } - // if region returned from getBucketLocation is null - // and if genericS3 is enabled - set back to 'us-east-1'. - if region == "" { - if genericS3 { - region = "us-east-1" - } - } - c.Region = region - c.setEndpoint(region, bucketName, u.Scheme) - return nil -} - -// setEndpoint - construct final endpoint based on region, bucket and scheme -func (c *Config) setEndpoint(region, bucketName, scheme string) { - var host string - for k, v := range regions { - if region == v { - host = k - } - } - // construct the new URL endpoint based on the region. - newURL := new(url.URL) - newURL.Host = bucketName + "." + host - newURL.Scheme = scheme - c.Endpoint = newURL.String() - return -} - -// API is a container which delegates methods that comply with CloudStorageAPI interface. -type API struct { - s3API -} - -// isVirtualHostedStyle - verify if host is virtual hosted style. -func isVirtualHostedStyle(host string) bool { - isS3VirtualHost, _ := filepath.Match("*.s3*.amazonaws.com", host) - isGoogleVirtualHost, _ := filepath.Match("*.storage.googleapis.com", host) - return isS3VirtualHost || isGoogleVirtualHost -} - -// New - instantiate minio client API with your input Config{}. -func New(config Config) (CloudStorageAPI, error) { - u, err := url.Parse(config.Endpoint) - if err != nil { - return API{}, err - } - config.isVirtualHostedStyle = isVirtualHostedStyle(u.Host) - // if not region is set, procure it from getBucketRegion if possible. - if config.Region == "" { - if err := config.setBucketRegion(); err != nil { - return API{}, err - } - } - /// Google cloud storage should be set to signature V2, force it if not. - if config.Region == "google" && config.Signature != SignatureV2 { - config.Signature = SignatureV2 - } - // Defaults to our library userAgent. - config.userAgent = libraryUserAgent - return API{s3API{&config}}, nil -} - -// SetAppInfo - add application details to user agent. -func (a API) SetAppInfo(appName string, appVersion string) { - // if app name and version is not set, we do not a new user agent. - if appName != "" && appVersion != "" { - appUserAgent := appName + "/" + appVersion - a.config.userAgent = libraryUserAgent + " " + appUserAgent - } -} - -// PresignedPostPolicy return POST form data that can be used for object upload. -func (a API) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { - if p.expiration.IsZero() { - return nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, errors.New("bucket name must be specified") - } - return a.presignedPostPolicy(p), nil -} - -/// Object operations. - -// PresignedPutObject get a presigned URL to upload an object. -// Expires maximum is 7days - ie. 604800 and minimum is 1. -func (a API) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 { - return "", ErrInvalidArgument("expires value cannot be lesser than 1 second") - } - if expireSeconds > 604800 { - return "", ErrInvalidArgument("expires value cannot be greater than 604800 second") - } - return a.presignedPutObject(bucketName, objectName, expireSeconds) -} - -// PresignedGetObject get a presigned URL to retrieve an object for third party apps. -func (a API) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 { - return "", ErrInvalidArgument("expires value cannot be lesser than 1 second") - } - if expireSeconds > 604800 { - return "", ErrInvalidArgument("expires value cannot be greater than 604800 second") - } - return a.presignedGetObject(bucketName, objectName, expireSeconds, 0, 0) -} - -// GetObject retrieve object. retrieves full object, if you need ranges use GetPartialObject. -func (a API) GetObject(bucketName, objectName string) (io.ReadSeeker, error) { - if !isValidBucketName(bucketName) { - return nil, ErrInvalidBucketName() - } - if !isValidObjectName(objectName) { - return nil, ErrInvalidObjectName() - } - // get object. - return newObjectReadSeeker(a, bucketName, objectName), nil -} - -// GetPartialObject retrieve partial object. -// -// Takes range arguments to download the specified range bytes of an object. -// Setting offset and length = 0 will download the full object. -// For more information about the HTTP Range header, -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 -func (a API) GetPartialObject(bucketName, objectName string, offset, length int64) (io.ReadSeeker, error) { - if !isValidBucketName(bucketName) { - return nil, ErrInvalidBucketName() - } - if !isValidObjectName(objectName) { - return nil, ErrInvalidObjectName() - } - // get partial object. - return newObjectReadSeeker(a, bucketName, objectName), nil -} - -// completedParts is a wrapper to make parts sortable by their part numbers. -// multi part completion requires list of multi parts to be sorted. -type completedParts []completePart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -// minimumPartSize minimum part size per object after which PutObject behaves internally as multipart. -var minimumPartSize int64 = 1024 * 1024 * 5 - -// maxParts - maximum parts for a single multipart session. -var maxParts = int64(10000) - -// maxPartSize - maximum part size for a single multipart upload operation. -var maxPartSize int64 = 1024 * 1024 * 1024 * 5 - -// maxConcurrentQueue - max concurrent upload queue, defaults to number of CPUs - 1. -var maxConcurrentQueue = int(math.Max(float64(runtime.NumCPU())-1, 1)) - -// calculatePartSize - calculate the optimal part size for the given objectSize. -// -// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible -// object storage it will have the following parameters as constants. -// -// maxParts - 10000 -// maximumPartSize - 5GB -// -// if the partSize after division with maxParts is greater than minimumPartSize -// then choose miniumPartSize as the new part size, if not return minimumPartSize. -// -// Special cases -// -// - if input object size is -1 then return maxPartSize. -// - if it happens to be that partSize is indeed bigger -// than the maximum part size just return maxPartSize. -// -func calculatePartSize(objectSize int64) int64 { - // if object size is -1 choose part size as 5GB. - if objectSize == -1 { - return maxPartSize - } - // make sure last part has enough buffer and handle this poperly. - partSize := (objectSize / (maxParts - 1)) - if partSize > minimumPartSize { - if partSize > maxPartSize { - return maxPartSize - } - return partSize - } - return minimumPartSize -} - -// Initiate a fresh multipart upload. -func (a API) newObjectUpload(bucketName, objectName, contentType string, size int64, data io.ReadSeeker) error { - // Initiate a new multipart upload request. - initMultipartUploadResult, err := a.initiateMultipartUpload(bucketName, objectName) - if err != nil { - return err - } - uploadID := initMultipartUploadResult.UploadID - complMultipartUpload := completeMultipartUpload{} - - // Calculate optimal part size for a given size. - partSize := calculatePartSize(size) - // Allocate bufferred error channel for maximum number of parts. - errCh := make(chan error, maxParts) - // Limit multi part queue size to max concurrent queue, defaults to NCPUs - 1. - mpQueueCh := make(chan struct{}, maxConcurrentQueue) - defer close(errCh) - defer close(mpQueueCh) - // Allocate a new wait group - wg := new(sync.WaitGroup) - - partNumber := 1 - var isEnableSha256Sum bool - if a.config.Signature.isV4() { - isEnableSha256Sum = true - } - for part := range partsManager(data, partSize, isEnableSha256Sum) { - // Limit to 4 parts a given time. - mpQueueCh <- struct{}{} - // Account for all parts uploaded simultaneousy. - wg.Add(1) - part.Number = partNumber - go func(errCh chan<- error, mpQueueCh <-chan struct{}, part partMetadata) { - defer wg.Done() - defer func() { - <-mpQueueCh - }() - if part.Err != nil { - errCh <- part.Err - return - } - var complPart completePart - complPart, err = a.uploadPart(bucketName, objectName, uploadID, part) - if err != nil { - errCh <- err - return - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) - errCh <- nil - }(errCh, mpQueueCh, part) - partNumber++ - } - wg.Wait() - if err := <-errCh; err != nil { - return err - } - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = a.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return err - } - return nil -} - -func (a API) listObjectPartsRecursive(bucketName, objectName, uploadID string) <-chan objectPartMetadata { - objectPartCh := make(chan objectPartMetadata, 1000) - go a.listObjectPartsRecursiveInRoutine(bucketName, objectName, uploadID, objectPartCh) - return objectPartCh -} - -func (a API) listObjectPartsRecursiveInRoutine(bucketName, objectName, uploadID string, ch chan<- objectPartMetadata) { - defer close(ch) - listObjPartsResult, err := a.listObjectParts(bucketName, objectName, uploadID, 0, 1000) - if err != nil { - ch <- objectPartMetadata{ - Err: err, - } - return - } - for _, uploadedObjectPart := range listObjPartsResult.ObjectParts { - ch <- uploadedObjectPart - } - for { - if !listObjPartsResult.IsTruncated { - break - } - nextPartNumberMarker := listObjPartsResult.NextPartNumberMarker - listObjPartsResult, err = a.listObjectParts(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) - if err != nil { - ch <- objectPartMetadata{ - Err: err, - } - return - } - for _, uploadedObjectPart := range listObjPartsResult.ObjectParts { - ch <- uploadedObjectPart - } - } -} - -// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. -func (a API) getTotalMultipartSize(bucketName, objectName, uploadID string) (int64, error) { - var size int64 - for part := range a.listObjectPartsRecursive(bucketName, objectName, uploadID) { - if part.Err != nil { - return 0, part.Err - } - size += part.Size - } - return size, nil -} - -// continue previously interrupted multipart upload object at `uploadID` -func (a API) continueObjectUpload(bucketName, objectName, uploadID string, size int64, data io.ReadSeeker) error { - var seekOffset int64 - partNumber := 1 - completeMultipartUpload := completeMultipartUpload{} - for objPart := range a.listObjectPartsRecursive(bucketName, objectName, uploadID) { - if objPart.Err != nil { - return objPart.Err - } - // partNumbers are sorted in listObjectParts. - if partNumber != objPart.PartNumber { - break - } - var completedPart completePart - completedPart.PartNumber = objPart.PartNumber - completedPart.ETag = objPart.ETag - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart) - seekOffset += objPart.Size // Add seek Offset for future Seek to skip entries. - partNumber++ // Update partNumber sequentially to verify and skip. - } - - // Calculate the optimal part size for a given size. - partSize := calculatePartSize(size) - // Allocate bufferred error channel for maximum parts. - errCh := make(chan error, maxParts) - // Limit multipart queue size to maxConcurrentQueue. - mpQueueCh := make(chan struct{}, maxConcurrentQueue) - defer close(errCh) - defer close(mpQueueCh) - // Allocate a new wait group. - wg := new(sync.WaitGroup) - - if _, err := data.Seek(seekOffset, 0); err != nil { - return err - } - var isEnableSha256Sum bool - if a.config.Signature.isV4() { - isEnableSha256Sum = true - } - for part := range partsManager(data, partSize, isEnableSha256Sum) { - // Limit to 4 parts a given time. - mpQueueCh <- struct{}{} - // Account for all parts uploaded simultaneousy. - wg.Add(1) - part.Number = partNumber - go func(errCh chan<- error, mpQueueCh <-chan struct{}, part partMetadata) { - defer wg.Done() - defer func() { - <-mpQueueCh - }() - if part.Err != nil { - errCh <- part.Err - return - } - complPart, err := a.uploadPart(bucketName, objectName, uploadID, part) - if err != nil { - errCh <- err - return - } - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) - errCh <- nil - }(errCh, mpQueueCh, part) - partNumber++ - } - wg.Wait() - if err := <-errCh; err != nil { - return err - } - sort.Sort(completedParts(completeMultipartUpload.Parts)) - _, err := a.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) - if err != nil { - return err - } - return nil -} - -// PutObject create an object in a bucket. -// -// You must have WRITE permissions on a bucket to create an object. -// -// - For size lesser than 5MB PutObject automatically does single Put operation. -// - For size equal to 0Bytes PutObject automatically does single Put operation. -// - For size larger than 5MB PutObject automatically does resumable multipart operation. -// - For size input as -1 PutObject treats it as a stream and does multipart operation until -// input stream reaches EOF. Maximum object size that can be uploaded through this operation -// will be 5TB. -// -// NOTE: if you are using Google Cloud Storage. Then there is no resumable multipart -// upload support yet. Currently PutObject will behave like a single PUT operation and would -// only upload for file sizes upto maximum 5GB. (maximum limit for single PUT operation). -// -// For un-authenticated requests S3 doesn't allow multipart upload, so we fall back to single -// PUT operation. -func (a API) PutObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() - } - if !isValidObjectName(objectName) { - return ErrInvalidObjectName() - } - // NOTE: S3 doesn't allow anonymous multipart requests. - if strings.Contains(a.config.Endpoint, "amazonaws.com") || strings.Contains(a.config.Endpoint, "googleapis.com") { - if a.config.isAnonymous() { - if size == -1 { - return ErrorResponse{ - Code: "NotImplemented", - Message: "For Anonymous requests Content-Length cannot be '-1'.", - Resource: separator + bucketName + separator + objectName, - } - } - if size > maxPartSize { - return ErrorResponse{ - Code: "EntityTooLarge", - Message: "Your proposed upload exceeds the maximum allowed object size '5GB' for single PUT operation.", - Resource: separator + bucketName + separator + objectName, - } - } - // For anonymous requests, we will not calculate sha256 and md5sum. - putObjMetadata := putObjectMetadata{ - MD5Sum: nil, - Sha256Sum: nil, - ReadCloser: ioutil.NopCloser(data), - Size: size, - ContentType: contentType, - } - _, err := a.putObject(bucketName, objectName, putObjMetadata) - if err != nil { - return err - } - } - } - // Special handling just for Google Cloud Storage. - // TODO - we should remove this in future when we fully implement Resumable object upload. - if strings.Contains(a.config.Endpoint, "googleapis.com") { - if size > maxPartSize { - return ErrorResponse{ - Code: "EntityTooLarge", - Message: "Your proposed upload exceeds the maximum allowed object size '5GB' for single PUT operation.", - Resource: separator + bucketName + separator + objectName, - } - } - putObjMetadata := putObjectMetadata{ - MD5Sum: nil, - Sha256Sum: nil, - ReadCloser: ioutil.NopCloser(data), - Size: size, - ContentType: contentType, - } - // NOTE: with Google Cloud Storage, Content-MD5 is deliberately skipped. - if _, err := a.putObject(bucketName, objectName, putObjMetadata); err != nil { - return err - } - return nil - } - switch { - case size < minimumPartSize && size >= 0: - dataBytes, err := ioutil.ReadAll(data) - if err != nil { - return err - } - if int64(len(dataBytes)) != size { - return ErrorResponse{ - Code: "UnexpectedShortRead", - Message: "Data read ‘" + strconv.FormatInt(int64(len(dataBytes)), 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(size, 10) + "’", - Resource: separator + bucketName + separator + objectName, - } - } - putObjMetadata := putObjectMetadata{ - MD5Sum: sumMD5(dataBytes), - Sha256Sum: sum256(dataBytes), - ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)), - Size: size, - ContentType: contentType, - } - // Single Part use case, use PutObject directly. - _, err = a.putObject(bucketName, objectName, putObjMetadata) - if err != nil { - return err - } - return nil - case size >= minimumPartSize || size == -1: - var inProgress bool - var inProgressUploadID string - for mpUpload := range a.listMultipartUploadsRecursive(bucketName, objectName) { - if mpUpload.Err != nil { - return mpUpload.Err - } - if mpUpload.Key == objectName { - inProgress = true - inProgressUploadID = mpUpload.UploadID - break - } - } - if !inProgress { - return a.newObjectUpload(bucketName, objectName, contentType, size, data) - } - return a.continueObjectUpload(bucketName, objectName, inProgressUploadID, size, data) - } - return errors.New("Unexpected control flow, please report this error at https://github.com/minio/minio-go/issues") -} - -// StatObject verify if object exists and you have permission to access it. -func (a API) StatObject(bucketName, objectName string) (ObjectStat, error) { - if !isValidBucketName(bucketName) { - return ObjectStat{}, ErrInvalidBucketName() - } - if !isValidObjectName(objectName) { - return ObjectStat{}, ErrInvalidObjectName() - } - return a.headObject(bucketName, objectName) -} - -// RemoveObject remove an object from a bucket. -func (a API) RemoveObject(bucketName, objectName string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() - } - if !isValidObjectName(objectName) { - return ErrInvalidObjectName() - } - return a.deleteObject(bucketName, objectName) + a.httpTransport = customHTTPTransport } /// Bucket operations // MakeBucket makes a new bucket. // -// Optional arguments are acl - by default all buckets are created -// with ``private`` acl. +// Optional arguments are acl and location - by default all buckets are created +// with ``private`` acl and in US Standard region. // // ACL valid values // @@ -743,21 +64,21 @@ func (a API) RemoveObject(bucketName, objectName string) error { // public-read-write - owner gets full access, all others get full access too. // authenticated-read - owner gets full access, authenticated users get read access. // -func (a API) MakeBucket(bucketName string, acl BucketACL) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() +// Region valid values. +// ------------------ +// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ] +// Defaults to US standard +func (a API) MakeBucket(bucketName string, acl BucketACL, region string) error { + if err := isValidBucketName(bucketName); err != nil { + return err } if !acl.isValidBucketACL() { return ErrInvalidArgument("Unrecognized ACL " + acl.String()) } - location := a.config.Region - if location == "us-east-1" { - location = "" - } - if location == "google" { - location = "" + if region == "" { + region = "us-east-1" } - return a.putBucket(bucketName, string(acl), location) + return a.putBucket(bucketName, string(acl), region) } // SetBucketACL set the permissions on an existing bucket using access control lists (ACL). @@ -769,8 +90,8 @@ func (a API) MakeBucket(bucketName string, acl BucketACL) error { // public-read-write - owner gets full access, all others get full access too. // authenticated-read - owner gets full access, authenticated users get read access. func (a API) SetBucketACL(bucketName string, acl BucketACL) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() + if err := isValidBucketName(bucketName); err != nil { + return err } if !acl.isValidBucketACL() { return ErrInvalidArgument("Unrecognized ACL " + acl.String()) @@ -787,47 +108,56 @@ func (a API) SetBucketACL(bucketName string, acl BucketACL) error { // public-read-write - owner gets full access, others get full access too. // authenticated-read - owner gets full access, authenticated users get read access. func (a API) GetBucketACL(bucketName string) (BucketACL, error) { - if !isValidBucketName(bucketName) { - return "", ErrInvalidBucketName() + if err := isValidBucketName(bucketName); err != nil { + return "", err } policy, err := a.getBucketACL(bucketName) if err != nil { return "", err } + // boolean cues to indentify right canned acls. + var publicRead, publicWrite bool + + // Handle grants. grants := policy.AccessControlList.Grant - switch { - case len(grants) == 1: - if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { - return BucketACL("private"), nil + for _, g := range grants { + if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" { + continue } - case len(grants) == 2: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return BucketACL("authenticated-read"), nil - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - return BucketACL("public-read"), nil - } - } - case len(grants) == 3: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - return BucketACL("public-read-write"), nil - } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return BucketACL("authenticated-read"), nil + } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + publicWrite = true + } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + publicRead = true } } + + // public write and not enabled. return. + if !publicWrite && !publicRead { + return BucketACL("private"), nil + } + // public write not enabled but public read is. return. + if !publicWrite && publicRead { + return BucketACL("public-read"), nil + } + // public read and public write are enabled return. + if publicRead && publicWrite { + return BucketACL("public-read-write"), nil + } + return "", ErrorResponse{ - Code: "NoSuchBucketPolicy", - Message: "The specified bucket does not have a bucket policy.", - Resource: separator + bucketName, - RequestID: "minio", + Code: "NoSuchBucketPolicy", + Message: "The specified bucket does not have a bucket policy.", + BucketName: bucketName, + RequestID: "minio", } } // BucketExists verify if bucket exists and you have permission to access it. func (a API) BucketExists(bucketName string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() + if err := isValidBucketName(bucketName); err != nil { + return err } return a.headBucket(bucketName) } @@ -837,196 +167,26 @@ func (a API) BucketExists(bucketName string) error { // All objects (including all object versions and delete markers). // in the bucket must be deleted before successfully attempting this request. func (a API) RemoveBucket(bucketName string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() + if err := isValidBucketName(bucketName); err != nil { + return err } return a.deleteBucket(bucketName) } -func (a API) listMultipartUploadsRecursive(bucketName, objectName string) <-chan ObjectMultipartStat { - ch := make(chan ObjectMultipartStat, 1000) - go a.listMultipartUploadsRecursiveInRoutine(bucketName, objectName, ch) - return ch -} - -func (a API) listMultipartUploadsRecursiveInRoutine(bucketName, objectName string, ch chan<- ObjectMultipartStat) { - defer close(ch) - listMultipartUplResult, err := a.listMultipartUploads(bucketName, "", "", objectName, "", 1000) - if err != nil { - ch <- ObjectMultipartStat{ - Err: err, - } - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - ch <- multiPartUpload - } - for { - if !listMultipartUplResult.IsTruncated { - break - } - listMultipartUplResult, err = a.listMultipartUploads(bucketName, - listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, objectName, "", 1000) - if err != nil { - ch <- ObjectMultipartStat{ - Err: err, - } - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - ch <- multiPartUpload - } - } -} - -// listIncompleteUploadsInRoutine is an internal goroutine function called for listing objects. -func (a API) listIncompleteUploadsInRoutine(bucketName, prefix string, recursive bool, ch chan<- ObjectMultipartStat) { - defer close(ch) - if !isValidBucketName(bucketName) { - ch <- ObjectMultipartStat{ - Err: ErrInvalidBucketName(), - } - return - } - switch { - case recursive == true: - var multipartMarker string - var uploadIDMarker string - for { - result, err := a.listMultipartUploads(bucketName, multipartMarker, uploadIDMarker, prefix, "", 1000) - if err != nil { - ch <- ObjectMultipartStat{ - Err: err, - } - return - } - for _, objectSt := range result.Uploads { - // NOTE: getTotalMultipartSize can make listing incomplete uploads slower. - objectSt.Size, err = a.getTotalMultipartSize(bucketName, objectSt.Key, objectSt.UploadID) - if err != nil { - ch <- ObjectMultipartStat{ - Err: err, - } - } - ch <- objectSt - multipartMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - } - if !result.IsTruncated { - break - } - } - default: - var multipartMarker string - var uploadIDMarker string - for { - result, err := a.listMultipartUploads(bucketName, multipartMarker, uploadIDMarker, prefix, separator, 1000) - if err != nil { - ch <- ObjectMultipartStat{ - Err: err, - } - return - } - multipartMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - for _, objectSt := range result.Uploads { - objectSt.Size, err = a.getTotalMultipartSize(bucketName, objectSt.Key, objectSt.UploadID) - if err != nil { - ch <- ObjectMultipartStat{ - Err: err, - } - } - ch <- objectSt - } - for _, prefix := range result.CommonPrefixes { - object := ObjectMultipartStat{} - object.Key = prefix.Prefix - object.Size = 0 - ch <- object - } - if !result.IsTruncated { - break - } - } - } -} - -// ListIncompleteUploads - List incompletely uploaded multipart objects. -// -// ListIncompleteUploads is a channel based API implemented to facilitate -// ease of usage of S3 API ListMultipartUploads() by automatically -// recursively traversing all multipart objects on a given bucket if specified. +// ListBuckets list of all buckets owned by the authenticated sender of the request. // -// Your input paramters are just bucketName, prefix and recursive. -// If you enable recursive as 'true' this function will return back all -// the multipart objects in a given bucket name. +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. // // api := client.New(....) -// recursive := true -// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// for message := range api.ListBuckets() { // fmt.Println(message) // } // -func (a API) ListIncompleteUploads(bucketName, prefix string, recursive bool) <-chan ObjectMultipartStat { - objectMultipartStatCh := make(chan ObjectMultipartStat, 1000) - go a.listIncompleteUploadsInRoutine(bucketName, prefix, recursive, objectMultipartStatCh) - return objectMultipartStatCh -} - -// listObjectsInRoutine is an internal goroutine function called for listing objects. -// This function feeds data into channel. -func (a API) listObjectsInRoutine(bucketName, prefix string, recursive bool, ch chan<- ObjectStat) { - defer close(ch) - if !isValidBucketName(bucketName) { - ch <- ObjectStat{ - Err: ErrInvalidBucketName(), - } - return - } - switch { - case recursive == true: - var marker string - for { - result, err := a.listObjects(bucketName, marker, prefix, "", 1000) - if err != nil { - ch <- ObjectStat{ - Err: err, - } - return - } - for _, object := range result.Contents { - ch <- object - marker = object.Key - } - if !result.IsTruncated { - break - } - } - default: - var marker string - for { - result, err := a.listObjects(bucketName, marker, prefix, separator, 1000) - if err != nil { - ch <- ObjectStat{ - Err: err, - } - return - } - marker = result.NextMarker - for _, object := range result.Contents { - ch <- object - } - for _, prefix := range result.CommonPrefixes { - object := ObjectStat{} - object.Key = prefix.Prefix - object.Size = 0 - ch <- object - } - if !result.IsTruncated { - break - } - } - } +func (a API) ListBuckets() <-chan BucketStat { + ch := make(chan BucketStat, 100) + go a.listBucketsInRoutine(ch) + return ch } // ListObjects - (List Objects) - List some objects or all recursively. @@ -1051,87 +211,133 @@ func (a API) ListObjects(bucketName string, prefix string, recursive bool) <-cha return ch } -// listBucketsInRoutine is an internal go routine function called for listing buckets -// This function feeds data into channel -func (a API) listBucketsInRoutine(ch chan<- BucketStat) { - defer close(ch) - listAllMyBucketListResults, err := a.listBuckets() - if err != nil { - ch <- BucketStat{ - Err: err, - } - return - } - for _, bucket := range listAllMyBucketListResults.Buckets.Bucket { - ch <- bucket - } -} - -// ListBuckets list of all buckets owned by the authenticated sender of the request. +// ListIncompleteUploads - List incompletely uploaded multipart objects. // -// This call requires explicit authentication, no anonymous requests are -// allowed for listing buckets. +// ListIncompleteUploads is a channel based API implemented to facilitate +// ease of usage of S3 API ListMultipartUploads() by automatically +// recursively traversing all multipart objects on a given bucket if specified. +// +// Your input paramters are just bucketName, prefix and recursive. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. // // api := client.New(....) -// for message := range api.ListBuckets() { +// recursive := true +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { // fmt.Println(message) // } // -func (a API) ListBuckets() <-chan BucketStat { - ch := make(chan BucketStat, 100) - go a.listBucketsInRoutine(ch) - return ch +func (a API) ListIncompleteUploads(bucketName, prefix string, recursive bool) <-chan ObjectMultipartStat { + return a.listIncompleteUploads(bucketName, prefix, recursive) } -func (a API) removeIncompleteUploadInRoutine(bucketName, objectName string, errorCh chan<- error) { - defer close(errorCh) - if !isValidBucketName(bucketName) { - errorCh <- ErrInvalidBucketName() - return +// GetObject retrieve object. retrieves full object, if you need ranges use GetPartialObject. +func (a API) GetObject(bucketName, objectName string) (io.ReadSeeker, error) { + if err := isValidBucketName(bucketName); err != nil { + return nil, err } - if !isValidObjectName(objectName) { - errorCh <- ErrInvalidObjectName() - return + if err := isValidObjectName(objectName); err != nil { + return nil, err } - listMultipartUplResult, err := a.listMultipartUploads(bucketName, "", "", objectName, "", 1000) - if err != nil { - errorCh <- err - return + // get object. + return newObjectReadSeeker(a, bucketName, objectName), nil +} + +// GetPartialObject retrieve partial object. +// +// Takes range arguments to download the specified range bytes of an object. +// Setting offset and length = 0 will download the full object. +// For more information about the HTTP Range header, +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 +func (a API) GetPartialObject(bucketName, objectName string, offset, length int64) (io.ReadSeeker, error) { + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, err + } + // get partial object. + return newObjectReadSeeker(a, bucketName, objectName), nil +} + +// PutObject create an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size lesser than 5MB PutObject automatically does single Put operation. +// - For size equal to 0Bytes PutObject automatically does single Put operation. +// - For size larger than 5MB PutObject automatically does resumable multipart operation. +// - For size input as -1 PutObject treats it as a stream and does multipart operation until +// input stream reaches EOF. Maximum object size that can be uploaded through this operation +// will be 5TB. +// +// NOTE: if you are using Google Cloud Storage. Then there is no resumable multipart +// upload support yet. Currently PutObject will behave like a single PUT operation and would +// only upload for file sizes upto maximum 5GB. (maximum limit for single PUT operation). +// +// For un-authenticated requests S3 doesn't allow multipart upload, so we fall back to single PUT operation. +func (a API) PutObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) error { + if err := isValidBucketName(bucketName); err != nil { + return err } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - if objectName == multiPartUpload.Key { - err := a.abortMultipartUpload(bucketName, multiPartUpload.Key, multiPartUpload.UploadID) - if err != nil { - errorCh <- err - return + if err := isValidObjectName(objectName); err != nil { + return err + } + // NOTE: S3 doesn't allow anonymous multipart requests. + if isAmazonEndpoint(a.endpointURL) && isAnonymousCredentials(*a.credentials) { + if size <= -1 { + return ErrorResponse{ + Code: "NotImplemented", + Message: "For anonymous requests Content-Length cannot be negative.", + Key: objectName, + BucketName: bucketName, } - return } + // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GB in size. + return a.putNoChecksum(bucketName, objectName, data, size, contentType) } - for { - if !listMultipartUplResult.IsTruncated { - break - } - listMultipartUplResult, err = a.listMultipartUploads(bucketName, - listMultipartUplResult.NextKeyMarker, - listMultipartUplResult.NextUploadIDMarker, - objectName, "", 1000) - if err != nil { - errorCh <- err - return - } - for _, multiPartUpload := range listMultipartUplResult.Uploads { - if objectName == multiPartUpload.Key { - err := a.abortMultipartUpload(bucketName, multiPartUpload.Key, multiPartUpload.UploadID) - if err != nil { - errorCh <- err - return - } - return + // FIXME: we should remove this in future when we fully implement + // resumable object upload for Google Cloud Storage. + if isGoogleEndpoint(a.endpointURL) { + if size <= -1 { + return ErrorResponse{ + Code: "NotImplemented", + Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", + Key: objectName, + BucketName: bucketName, } } + // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GB in size. + return a.putNoChecksum(bucketName, objectName, data, size, contentType) + } + // Large file upload is initiated for uploads for input data size + // if its greater than 5MB or data size is negative. + if size >= minimumPartSize || size < 0 { + return a.putLargeObject(bucketName, objectName, data, size, contentType) + } + return a.putSmallObject(bucketName, objectName, data, size, contentType) +} + +// StatObject verify if object exists and you have permission to access it. +func (a API) StatObject(bucketName, objectName string) (ObjectStat, error) { + if err := isValidBucketName(bucketName); err != nil { + return ObjectStat{}, err + } + if err := isValidObjectName(objectName); err != nil { + return ObjectStat{}, err + } + return a.headObject(bucketName, objectName) +} +// RemoveObject remove an object from a bucket. +func (a API) RemoveObject(bucketName, objectName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err } + return a.deleteObject(bucketName, objectName) } // RemoveIncompleteUpload - abort a specific in progress active multipart upload. @@ -1141,3 +347,36 @@ func (a API) RemoveIncompleteUpload(bucketName, objectName string) <-chan error go a.removeIncompleteUploadInRoutine(bucketName, objectName, errorCh) return errorCh } + +// PresignedGetObject get a presigned URL to get an object for third party apps. +func (a API) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) { + if err := isValidExpiry(expires); err != nil { + return "", err + } + expireSeconds := int64(expires / time.Second) + return a.presignedGetObject(bucketName, objectName, expireSeconds, 0, 0) +} + +// PresignedPutObject get a presigned URL to upload an object. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (a API) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) { + if err := isValidExpiry(expires); err != nil { + return "", err + } + expireSeconds := int64(expires / time.Second) + return a.presignedPutObject(bucketName, objectName, expireSeconds) +} + +// PresignedPostPolicy return POST form data that can be used for object upload. +func (a API) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { + if p.expiration.IsZero() { + return nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, errors.New("bucket name must be specified") + } + return a.presignedPostPolicy(p) +} diff --git a/api_functional_test.go b/api_functional_test.go new file mode 100644 index 0000000000..c2e19e73ac --- /dev/null +++ b/api_functional_test.go @@ -0,0 +1,87 @@ +package minio_test + +import ( + "log" + "math/rand" + "testing" + "time" + + "github.com/minio/minio-go" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569-" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return string(b[0:30]) +} + +func TestFunctional(t *testing.T) { + a, err := minio.New("play.minio.io:9002", + "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) + err = a.MakeBucket(bucketName, "private", "us-east-1") + if err != nil { + t.Fatal("Error", err) + } + + err = a.BucketExists(bucketName) + if err != nil { + t.Fatal("Error", err) + } + + err = a.SetBucketACL(bucketName, "public-read-write") + if err != nil { + t.Fatal("Error", err) + } + + acl, err := a.GetBucketACL(bucketName) + if err != nil { + t.Fatal("Error", err) + } + if acl != minio.BucketACL("public-read-write") { + t.Fatal("Error", acl) + } + + for b := range a.ListBuckets() { + if b.Err != nil { + t.Fatal("Error", b.Err) + } + } + + err = a.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error", err) + } + + err = a.RemoveBucket("bucket1") + if err == nil { + t.Fatal("Error") + } + + if err.Error() != "The specified bucket does not exist." { + t.Fatal("Error", err) + } + +} diff --git a/api_handlers_test.go b/api_handlers_test.go index f214f981f8..914739959e 100644 --- a/api_handlers_test.go +++ b/api_handlers_test.go @@ -43,14 +43,15 @@ type bucketHandler struct { } func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path switch { case r.Method == "GET": switch { - case r.URL.Path == "/": + case path == "/": response := []byte("bucket2015-05-20T23:05:09.230Zminiominio") w.Header().Set("Content-Length", strconv.Itoa(len(response))) w.Write(response) - case r.URL.Path == "/bucket": + case path == h.resource: _, ok := r.URL.Query()["acl"] if ok { response := []byte("75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.com75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.comFULL_CONTROL") @@ -59,14 +60,14 @@ func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } fallthrough - case r.URL.Path == "/bucket": + case path == h.resource: response := []byte("\"259d04a13802ae09c7e41be50ccc6baa\"object2015-05-21T18:24:21.097Z22061miniominioSTANDARDfalse1000testbucket") w.Header().Set("Content-Length", strconv.Itoa(len(response))) w.Write(response) } case r.Method == "PUT": switch { - case r.URL.Path == h.resource: + case path == h.resource: _, ok := r.URL.Query()["acl"] if ok { switch r.Header.Get("x-amz-acl") { @@ -90,14 +91,14 @@ func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } case r.Method == "HEAD": switch { - case r.URL.Path == h.resource: + case path == h.resource: w.WriteHeader(http.StatusOK) default: w.WriteHeader(http.StatusForbidden) } case r.Method == "DELETE": switch { - case r.URL.Path != h.resource: + case path != h.resource: w.WriteHeader(http.StatusNotFound) default: h.resource = "" diff --git a/api_private_test.go b/api_private_test.go index f23c58f29e..f27a717e23 100644 --- a/api_private_test.go +++ b/api_private_test.go @@ -16,22 +16,25 @@ package minio -import "testing" +import ( + "net/url" + "testing" +) func TestSignature(t *testing.T) { - conf := new(Config) - if !conf.Signature.isLatest() { + credentials := clientCredentials{} + if !credentials.Signature.isLatest() { t.Fatalf("Error") } - conf.Signature = SignatureV2 - if !conf.Signature.isV2() { + credentials.Signature = SignatureV2 + if !credentials.Signature.isV2() { t.Fatalf("Error") } - if conf.Signature.isV4() { + if credentials.Signature.isV4() { t.Fatalf("Error") } - conf.Signature = SignatureV4 - if !conf.Signature.isV4() { + credentials.Signature = SignatureV4 + if !credentials.Signature.isV4() { t.Fatalf("Error") } } @@ -51,17 +54,6 @@ func TestACLTypes(t *testing.T) { } } -func TestGetRegion(t *testing.T) { - region := getRegion("s3.amazonaws.com") - if region != "us-east-1" { - t.Fatalf("Error") - } - region = getRegion("localhost:9000") - if region != "us-east-1" { - t.Fatalf("Error") - } -} - func TestPartSize(t *testing.T) { var maxPartSize int64 = 1024 * 1024 * 1024 * 5 partSize := calculatePartSize(5000000000000000000) @@ -110,8 +102,128 @@ func TestURLEncoding(t *testing.T) { } for _, u := range want { - if u.encodedName != getURLEncodedPath(u.name) { + if u.encodedName != urlEncodePath(u.name) { t.Errorf("Error") } } } + +func TestGetEndpointURL(t *testing.T) { + if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil { + t.Fatal(err) + } + if _, err := getEndpointURL("192.168.1.1", false); err != nil { + t.Fatal(err) + } + if _, err := getEndpointURL("13333.123123.", false); err == nil { + t.Fatal("Error") + } + if _, err := getEndpointURL("s3.aamzza.", false); err == nil { + t.Fatal("Error") + } + if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil { + t.Fatal("Error") + } +} + +func TestValidIP(t *testing.T) { + type validIP struct { + ip string + valid bool + } + + want := []validIP{ + { + ip: "192.168.1.1", + valid: true, + }, + { + ip: "192.1.8", + valid: false, + }, + { + ip: "..192.", + valid: false, + }, + { + ip: "192.168.1.1.1", + valid: false, + }, + } + for _, w := range want { + valid := validIPAddress.MatchString(w.ip) + if valid != w.valid { + t.Fatal("Error") + } + } +} + +func TestValidEndpointDomain(t *testing.T) { + type validEndpoint struct { + endpointDomain string + valid bool + } + + want := []validEndpoint{ + { + endpointDomain: "s3.amazonaws.com", + valid: true, + }, + { + endpointDomain: "s3.amazonaws.com_", + valid: false, + }, + { + endpointDomain: "%$$$", + valid: false, + }, + { + endpointDomain: "s3.amz.test.com", + valid: false, + }, + { + endpointDomain: "s3.", + valid: false, + }, + } + for _, w := range want { + valid := validEndpointDomain.MatchString(w.endpointDomain) + if valid != w.valid { + t.Fatal("Error") + } + } +} + +func TestValidEndpointURL(t *testing.T) { + type validURL struct { + url string + valid bool + } + want := []validURL{ + { + url: "https://s3.amazonaws.com", + valid: true, + }, + { + url: "https://s3.amazonaws.com/bucket/object", + valid: false, + }, + { + url: "192.168.1.1", + valid: false, + }, + } + for _, w := range want { + u, err := url.Parse(w.url) + if err != nil { + t.Fatal("Error") + } + valid := false + if err := isValidEndpointURL(u); err == nil { + valid = true + } + if valid != w.valid { + t.Fatal("Error") + } + } +} diff --git a/api_public_test.go b/api_public_test.go index 1f273f0ceb..c88c7ac4d1 100644 --- a/api_public_test.go +++ b/api_public_test.go @@ -20,6 +20,7 @@ import ( "bytes" "io" "net/http/httptest" + "net/url" "testing" "time" @@ -31,7 +32,11 @@ func TestUserAgent(t *testing.T) { server := httptest.NewServer(userAgent) defer server.Close() - a, err := minio.New(minio.Config{Endpoint: server.URL}) + u, err := url.Parse(server.URL) + if err != nil { + t.Fatal("Error") + } + a, err := minio.New(u.Host, "", "", true) if err != nil { t.Fatal("Error") } @@ -39,7 +44,7 @@ func TestUserAgent(t *testing.T) { a.SetAppInfo("hello-app", "1.0") // Initiate a request. - a.MakeBucket("bucket", "private") + a.MakeBucket("bucket", "private", "") // Set app info again, this should not have set. a.SetAppInfo("new-hello-app", "2.0") @@ -59,11 +64,15 @@ func TestBucketOperations(t *testing.T) { server := httptest.NewServer(bucket) defer server.Close() - a, err := minio.New(minio.Config{Endpoint: server.URL}) + u, err := url.Parse(server.URL) + if err != nil { + t.Fatal("Error") + } + a, err := minio.New(u.Host, "", "", true) if err != nil { t.Fatal("Error") } - err = a.MakeBucket("bucket", "private") + err = a.MakeBucket("bucket", "private", "") if err != nil { t.Fatal("Error") } @@ -133,11 +142,15 @@ func TestBucketOperationsFail(t *testing.T) { server := httptest.NewServer(bucket) defer server.Close() - a, err := minio.New(minio.Config{Endpoint: server.URL}) + u, err := url.Parse(server.URL) if err != nil { t.Fatal("Error") } - err = a.MakeBucket("bucket$$$", "private") + a, err := minio.New(u.Host, "", "", true) + if err != nil { + t.Fatal("Error") + } + err = a.MakeBucket("bucket$$$", "private", "") if err == nil { t.Fatal("Error") } @@ -168,7 +181,7 @@ func TestBucketOperationsFail(t *testing.T) { t.Fatal("Error") } - if err.Error() != "The specified bucket is not valid." { + if err.Error() != "Bucket name contains invalid characters." { t.Fatal("Error") } } @@ -181,7 +194,11 @@ func TestObjectOperations(t *testing.T) { server := httptest.NewServer(object) defer server.Close() - a, err := minio.New(minio.Config{Endpoint: server.URL}) + u, err := url.Parse(server.URL) + if err != nil { + t.Fatal("Error") + } + a, err := minio.New(u.Host, "", "", true) if err != nil { t.Fatal("Error") } @@ -226,7 +243,11 @@ func TestPresignedURL(t *testing.T) { server := httptest.NewServer(object) defer server.Close() - a, err := minio.New(minio.Config{Endpoint: server.URL}) + u, err := url.Parse(server.URL) + if err != nil { + t.Fatal("Error") + } + a, err := minio.New(u.Host, "", "", true) if err != nil { t.Fatal("Error") } @@ -236,11 +257,7 @@ func TestPresignedURL(t *testing.T) { t.Fatal("Error") } - a, err = minio.New(minio.Config{ - Endpoint: server.URL, - AccessKeyID: "accessKey", - SecretAccessKey: "secretKey", - }) + a, err = minio.New(u.Host, "accessKey", "secretKey", true) if err != nil { t.Fatal("Error") } @@ -262,7 +279,7 @@ func TestPresignedURL(t *testing.T) { } func TestErrorResponse(t *testing.T) { - errorResponse := []byte("AccessDeniedAccess Denied/mybucket/myphoto.jpgF19772218238A85AGuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD") + errorResponse := []byte("AccessDeniedAccess Deniedmybucketmyphoto.jpgF19772218238A85AGuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD") errorReader := bytes.NewReader(errorResponse) err := minio.BodyToErrorResponse(errorReader) if err == nil { @@ -272,10 +289,6 @@ func TestErrorResponse(t *testing.T) { t.Fatal("Error") } resp := minio.ToErrorResponse(err) - // valid all fields. - if resp == nil { - t.Fatal("Error") - } if resp.Code != "AccessDenied" { t.Fatal("Error") } @@ -285,7 +298,10 @@ func TestErrorResponse(t *testing.T) { if resp.Message != "Access Denied" { t.Fatal("Error") } - if resp.Resource != "/mybucket/myphoto.jpg" { + if resp.BucketName != "mybucket" { + t.Fatal("Error") + } + if resp.Key != "myphoto.jpg" { t.Fatal("Error") } if resp.HostID != "GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD" { diff --git a/error-response.go b/error-response.go index d820d7c3a1..294430faac 100644 --- a/error-response.go +++ b/error-response.go @@ -27,7 +27,8 @@ import ( AccessDenied Access Denied - /mybucket/myphoto.jpg + bucketName + objectName F19772218238A85A GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD @@ -35,12 +36,13 @@ import ( // ErrorResponse is the type error returned by some API operations. type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - Resource string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` // This is a new undocumented field, set only if available. AmzBucketRegion string @@ -60,12 +62,12 @@ type ErrorResponse struct { // fmt.Println(resp.ToXML()) // } // ... -func ToErrorResponse(err error) *ErrorResponse { +func ToErrorResponse(err error) ErrorResponse { switch err := err.(type) { case ErrorResponse: - return &err + return err default: - return nil + return ErrorResponse{} } } @@ -103,23 +105,27 @@ func BodyToErrorResponse(errBody io.Reader) error { } // ErrInvalidBucketName - invalid bucket name response. -var ErrInvalidBucketName = func() error { +var ErrInvalidBucketName = func(message string) error { return ErrorResponse{ Code: "InvalidBucketName", - Message: "The specified bucket is not valid.", + Message: message, RequestID: "minio", } } // ErrInvalidObjectName - invalid object name response. -var ErrInvalidObjectName = func() error { +var ErrInvalidObjectName = func(message string) error { return ErrorResponse{ Code: "NoSuchKey", - Message: "The specified key does not exist.", + Message: message, RequestID: "minio", } } +// ErrInvalidObjectPrefix - invalid object prefix response is +// similar to object name response. +var ErrInvalidObjectPrefix = ErrInvalidObjectName + // ErrInvalidArgument - invalid argument response. var ErrInvalidArgument = func(message string) error { return ErrorResponse{ diff --git a/examples/play/bucketexists.go b/examples/play/bucketexists.go index b417b6278e..2a801de34f 100644 --- a/examples/play/bucketexists.go +++ b/examples/play/bucketexists.go @@ -25,16 +25,20 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.BucketExists("bucketName") + + err = s3Client.BucketExists("bucket-name") if err != nil { log.Fatalln(err) } + log.Println("Success") } diff --git a/examples/play/getbucketacl.go b/examples/play/getbucketacl.go index adfdfce165..e02ad557ca 100644 --- a/examples/play/getbucketacl.go +++ b/examples/play/getbucketacl.go @@ -25,14 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - acl, err := s3Client.GetBucketACL("bucketName") + + acl, err := s3Client.GetBucketACL("bucket-name") if err != nil { log.Fatalln(err) } diff --git a/examples/play/getobject.go b/examples/play/getobject.go index 80071cc8d3..e5f5b99226 100644 --- a/examples/play/getobject.go +++ b/examples/play/getobject.go @@ -27,14 +27,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - reader, err := s3Client.GetObject("bucketName", "objectName") + reader, err := s3Client.GetObject("bucke-name", "objectName") if err != nil { log.Fatalln(err) } diff --git a/examples/play/getpartialobject.go b/examples/play/getpartialobject.go index 1e62ec32a9..34da70d870 100644 --- a/examples/play/getpartialobject.go +++ b/examples/play/getpartialobject.go @@ -27,14 +27,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - reader, stat, err := s3Client.GetPartialObject("bucketName", "objectName", 0, 10) + + reader, stat, err := s3Client.GetPartialObject("bucket-name", "objectName", 0, 10) if err != nil { log.Fatalln(err) } diff --git a/examples/play/listbuckets.go b/examples/play/listbuckets.go index 171a8f0411..f299df34b1 100644 --- a/examples/play/listbuckets.go +++ b/examples/play/listbuckets.go @@ -25,13 +25,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } + for bucket := range s3Client.ListBuckets() { if bucket.Err != nil { log.Fatalln(bucket.Err) diff --git a/examples/play/listincompleteuploads.go b/examples/play/listincompleteuploads.go index 7e32310542..d68500f9b9 100644 --- a/examples/play/listincompleteuploads.go +++ b/examples/play/listincompleteuploads.go @@ -25,17 +25,17 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - // Recursive - for multipartObject := range s3Client.ListIncompleteUploads("bucketName", "objectName", true) { + + for multipartObject := range s3Client.ListIncompleteUploads("bucket-name", "objectName", true) { if multipartObject.Err != nil { log.Fatalln(multipartObject.Err) } diff --git a/examples/play/listobjects.go b/examples/play/listobjects.go index d22ee72839..2978e158ba 100644 --- a/examples/play/listobjects.go +++ b/examples/play/listobjects.go @@ -25,14 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - for object := range s3Client.ListObjects("bucketName", "", true) { + + for object := range s3Client.ListObjects("bucket-name", "prefix", true) { if object.Err != nil { log.Fatalln(object.Err) } diff --git a/examples/play/makebucket.go b/examples/play/makebucket.go index cc32feb3c8..5b0b0a962c 100644 --- a/examples/play/makebucket.go +++ b/examples/play/makebucket.go @@ -25,14 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.MakeBucket("bucketName", "us-east-1") + + err = s3Client.MakeBucket("bucket-name", minio.BucketACL("private"), "us-east-1") if err != nil { log.Fatalln(err) } diff --git a/examples/play/presignedgetobject.go b/examples/play/presignedgetobject.go new file mode 100644 index 0000000000..ae97872414 --- /dev/null +++ b/examples/play/presignedgetobject.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + presignedURL, err := s3Client.PresignedGetObject("bucket-name", "objectName", time.Duration(1000)*time.Second) + if err != nil { + log.Fatalln(err) + } + log.Println(presignedURL) +} diff --git a/examples/play/presignedpostpolicy.go b/examples/play/presignedpostpolicy.go new file mode 100644 index 0000000000..5e67f27fd4 --- /dev/null +++ b/examples/play/presignedpostpolicy.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + policy := minio.NewPostPolicy() + policy.SetBucket("bucket-name") + policy.SetKey("objectName") + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + m, err := s3Client.PresignedPostPolicy(policy) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("curl ") + for k, v := range m { + fmt.Printf("-F %s=%s ", k, v) + } + fmt.Printf("-F file=@/etc/bashrc ") + fmt.Printf(config.Endpoint + "/bucket-name\n") +} diff --git a/examples/play/presignedputobject.go b/examples/play/presignedputobject.go new file mode 100644 index 0000000000..3c4e0aea9e --- /dev/null +++ b/examples/play/presignedputobject.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "time" + + "github.com/minio/minio-go" +) + +func main() { + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) + if err != nil { + log.Fatalln(err) + } + + presignedURL, err := s3Client.PresignedPutObject("bucket-name", "objectName", time.Duration(1000)*time.Second) + if err != nil { + log.Fatalln(err) + } + log.Println(presignedURL) +} diff --git a/examples/play/putobject.go b/examples/play/putobject.go index 80ca9ea82e..f165254430 100644 --- a/examples/play/putobject.go +++ b/examples/play/putobject.go @@ -26,13 +26,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } + object, err := os.Open("testfile") if err != nil { log.Fatalln(err) @@ -40,7 +43,7 @@ func main() { defer object.Close() st, _ := object.Stat() - err = s3Client.PutObject("bucketName", "objectName", object, st.size, "application/octet-stream") + err = s3Client.PutObject("bucket-name", "objectName", object, st.Size(), "application/octet-stream") if err != nil { log.Fatalln(err) } diff --git a/examples/play/removebucket.go b/examples/play/removebucket.go index a33a1474d0..1b211a5e67 100644 --- a/examples/play/removebucket.go +++ b/examples/play/removebucket.go @@ -25,16 +25,17 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveBucket("bucketName") + + err = s3Client.RemoveBucket("bucket-name") if err != nil { log.Fatalln(err) } diff --git a/examples/play/removeincompleteupload.go b/examples/play/removeincompleteupload.go index 17bb485aee..9f07a23b54 100644 --- a/examples/play/removeincompleteupload.go +++ b/examples/play/removeincompleteupload.go @@ -25,14 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - for err := range s3Client.RemoveIncompleteUpload("bucketName", "objectName") { + + for err := range s3Client.RemoveIncompleteUpload("bucket-name", "objectName") { if err != nil { log.Fatalln(err) } diff --git a/examples/play/removeobject.go b/examples/play/removeobject.go index 9a197705bf..f2bcd52a8a 100644 --- a/examples/play/removeobject.go +++ b/examples/play/removeobject.go @@ -25,16 +25,16 @@ import ( ) func main() { - config := minio.Config{ - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveObject("bucketName", "objectName") + err = s3Client.RemoveObject("bucket-name", "objectName") if err != nil { log.Fatalln(err) } diff --git a/examples/play/setbucketacl.go b/examples/play/setbucketacl.go index 60c91c7bf8..f272eb658a 100644 --- a/examples/play/setbucketacl.go +++ b/examples/play/setbucketacl.go @@ -25,14 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - err = s3Client.SetBucketACL("bucketName", minio.BucketACL("public-read-write")) + + err = s3Client.SetBucketACL("bucket-name", minio.BucketACL("public-read-write")) if err != nil { log.Fatalln(err) } diff --git a/examples/play/statobject.go b/examples/play/statobject.go index 5edf5c47b4..ca777e3912 100644 --- a/examples/play/statobject.go +++ b/examples/play/statobject.go @@ -25,14 +25,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://play.minio.io:9000", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) if err != nil { log.Fatalln(err) } - stat, err := s3Client.StatObject("bucketName", "objectName") + stat, err := s3Client.StatObject("bucket-name", "objectName") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/bucketexists.go b/examples/s3/bucketexists.go index e7bad6df85..e60fadd909 100644 --- a/examples/s3/bucketexists.go +++ b/examples/s3/bucketexists.go @@ -25,21 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - err = s3Client.BucketExists("bucketName") + err = s3Client.BucketExists("bucket-name") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getbucketacl.go b/examples/s3/getbucketacl.go index 8613791a5c..b5e7c53a4e 100644 --- a/examples/s3/getbucketacl.go +++ b/examples/s3/getbucketacl.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - acl, err := s3Client.GetBucketACL("bucketName") + + acl, err := s3Client.GetBucketACL("bucket-name") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getobject.go b/examples/s3/getobject.go index 9b200b1662..b73ab19150 100644 --- a/examples/s3/getobject.go +++ b/examples/s3/getobject.go @@ -27,20 +27,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - reader, err := s3Client.GetObject("bucketName", "objectName") + reader, err := s3Client.GetObject("bucke-name", "objectName") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getpartialobject.go b/examples/s3/getpartialobject.go index c5cd3ade2c..b05bf5c10c 100644 --- a/examples/s3/getpartialobject.go +++ b/examples/s3/getpartialobject.go @@ -27,20 +27,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - reader, stat, err := s3Client.GetPartialObject("bucketName", "objectName", 0, 10) + + reader, stat, err := s3Client.GetPartialObject("bucket-name", "objectName", 0, 10) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/listbuckets.go b/examples/s3/listbuckets.go index b69376cef9..ae25e4d217 100644 --- a/examples/s3/listbuckets.go +++ b/examples/s3/listbuckets.go @@ -25,19 +25,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } + for bucket := range s3Client.ListBuckets() { if bucket.Err != nil { log.Fatalln(bucket.Err) diff --git a/examples/s3/listincompleteuploads.go b/examples/s3/listincompleteuploads.go index bb4cd06334..1deb80766a 100644 --- a/examples/s3/listincompleteuploads.go +++ b/examples/s3/listincompleteuploads.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - for multipartObject := range s3Client.ListIncompleteUploads("bucketName", "objectName", true) { + + for multipartObject := range s3Client.ListIncompleteUploads("bucket-name", "objectName", true) { if multipartObject.Err != nil { log.Fatalln(multipartObject.Err) } diff --git a/examples/s3/listobjects.go b/examples/s3/listobjects.go index de8ae714e1..3162c7b448 100644 --- a/examples/s3/listobjects.go +++ b/examples/s3/listobjects.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - for object := range s3Client.ListObjects("bucketName", "prefix", true) { + + for object := range s3Client.ListObjects("bucket-name", "prefix", true) { if object.Err != nil { log.Fatalln(object.Err) } diff --git a/examples/s3/makebucket.go b/examples/s3/makebucket.go index 2ea145bc68..27d90a826d 100644 --- a/examples/s3/makebucket.go +++ b/examples/s3/makebucket.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - err = s3Client.MakeBucket("bucketName", "us-east-1") + + err = s3Client.MakeBucket("bucket-name", minio.BucketACL("private"), "us-east-1") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/presignedgetobject.go b/examples/s3/presignedgetobject.go index 4cd0f9289f..0d7e253381 100644 --- a/examples/s3/presignedgetobject.go +++ b/examples/s3/presignedgetobject.go @@ -26,20 +26,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - presignedURL, err := s3Client.PresignedGetObject("bucketName", "objectName", time.Duration(1000)*time.Second) + + presignedURL, err := s3Client.PresignedGetObject("bucket-name", "objectName", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/presignedpostpolicy.go b/examples/s3/presignedpostpolicy.go index c75ee1e017..a13826486c 100644 --- a/examples/s3/presignedpostpolicy.go +++ b/examples/s3/presignedpostpolicy.go @@ -27,22 +27,19 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } + policy := minio.NewPostPolicy() + policy.SetBucket("bucket-name") policy.SetKey("objectName") - policy.SetBucket("bucketName") policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days m, err := s3Client.PresignedPostPolicy(policy) if err != nil { @@ -53,5 +50,5 @@ func main() { fmt.Printf("-F %s=%s ", k, v) } fmt.Printf("-F file=@/etc/bashrc ") - fmt.Printf(config.Endpoint + "/bucketName\n") + fmt.Printf(config.Endpoint + "/bucket-name\n") } diff --git a/examples/s3/presignedputobject.go b/examples/s3/presignedputobject.go index ee89c1a25d..9eb853b780 100644 --- a/examples/s3/presignedputobject.go +++ b/examples/s3/presignedputobject.go @@ -26,20 +26,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - presignedURL, err := s3Client.PresignedPutObject("bucketName", "objectName", time.Duration(1000)*time.Second) + + presignedURL, err := s3Client.PresignedPutObject("bucket-name", "objectName", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject.go b/examples/s3/putobject.go index 9dddffead4..c4312b90a0 100644 --- a/examples/s3/putobject.go +++ b/examples/s3/putobject.go @@ -26,19 +26,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } + object, err := os.Open("testfile") if err != nil { log.Fatalln(err) @@ -46,7 +43,7 @@ func main() { defer object.Close() st, _ := object.Stat() - err = s3Client.PutObject("bucketName", "objectName", object, st.size, "application/octet-stream") + err = s3Client.PutObject("bucket-name", "objectName", object, st.Size(), "application/octet-stream") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removebucket.go b/examples/s3/removebucket.go index 9dc257b931..288b771cf5 100644 --- a/examples/s3/removebucket.go +++ b/examples/s3/removebucket.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveBucket("bucketName") + + err = s3Client.RemoveBucket("bucket-name") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removeincompleteupload.go b/examples/s3/removeincompleteupload.go index 96eec42eb7..0557b69e2f 100644 --- a/examples/s3/removeincompleteupload.go +++ b/examples/s3/removeincompleteupload.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - for err := range s3Client.RemoveIncompleteUpload("bucketName", "objectName") { + + for err := range s3Client.RemoveIncompleteUpload("bucket-name", "objectName") { if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removeobject.go b/examples/s3/removeobject.go index f4bf640cc9..18d4354286 100644 --- a/examples/s3/removeobject.go +++ b/examples/s3/removeobject.go @@ -25,16 +25,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - s3Client, err := minio.New(config) + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). + + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveObject("bucketName", "objectName") + err = s3Client.RemoveObject("bucket-name", "objectName") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/setbucketacl.go b/examples/s3/setbucketacl.go index a729078773..87f193cf2f 100644 --- a/examples/s3/setbucketacl.go +++ b/examples/s3/setbucketacl.go @@ -25,20 +25,17 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - err = s3Client.SetBucketACL("bucketName", minio.BucketACL("public-read-write")) + + err = s3Client.SetBucketACL("bucket-name", minio.BucketACL("public-read-write")) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/statobject.go b/examples/s3/statobject.go index b2446c792d..3d5e787ea9 100644 --- a/examples/s3/statobject.go +++ b/examples/s3/statobject.go @@ -25,20 +25,16 @@ import ( ) func main() { - config := minio.Config{ - Endpoint: "https://s3.amazonaws.com", - AccessKeyID: "YOUR-ACCESS-KEY-HERE", - SecretAccessKey: "YOUR-PASSWORD-HERE", - } - - // Default is Signature Version 4. To enable Signature Version 2 do the following. - // config.Signature = minio.SignatureV2 + // Requests are always secure by default. set inSecure=true to enable insecure access. + // inSecure boolean is the last argument for New(). - s3Client, err := minio.New(config) + // New provides a client object backend by automatically detected signature type based + // on the provider. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) if err != nil { log.Fatalln(err) } - stat, err := s3Client.StatObject("bucketName", "objectName") + stat, err := s3Client.StatObject("bucket-name", "objectName") if err != nil { log.Fatalln(err) } diff --git a/helpers.go b/helpers.go new file mode 100644 index 0000000000..10a07ad654 --- /dev/null +++ b/helpers.go @@ -0,0 +1,232 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/hex" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" +) + +// minimumPartSize minimum part size per object after which PutObject behaves internally as multipart. +var minimumPartSize int64 = 1024 * 1024 * 5 + +// maxParts - maximum parts for a single multipart session. +var maxParts = int64(10000) + +// maxPartSize - maximum part size for a single multipart upload operation. +var maxPartSize int64 = 1024 * 1024 * 1024 * 5 + +// isAnonymousCredentials - True if config doesn't have access and secret keys. +func isAnonymousCredentials(c clientCredentials) bool { + if c.AccessKeyID != "" && c.SecretAccessKey != "" { + return false + } + return true +} + +// isVirtualHostSupported - verify if host supports virtual hosted style. +// Currently only Amazon S3 and Google Cloud Storage would support this. +func isVirtualHostSupported(endpointURL *url.URL) bool { + return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL) +} + +// Match if it is exactly Amazon S3 endpoint. +func isAmazonEndpoint(endpointURL *url.URL) bool { + if endpointURL == nil { + return false + } + if endpointURL.Host == "s3.amazonaws.com" { + return true + } + return false +} + +// Match if it is exactly Google cloud storage endpoint. +func isGoogleEndpoint(endpointURL *url.URL) bool { + if endpointURL == nil { + return false + } + if endpointURL.Host == "storage.googleapis.com" { + return true + } + return false +} + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL *url.URL) error { + if endpointURL == nil { + return ErrInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return ErrInvalidArgument("Endpoing url cannot have fully qualified paths.") + } + if strings.Contains(endpointURL.Host, ".amazonaws.com") { + if !isAmazonEndpoint(endpointURL) { + return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(endpointURL.Host, ".googleapis.com") { + if !isGoogleEndpoint(endpointURL) { + return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return ErrInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return ErrInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +/// Excerpts from - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html +/// When using virtual hosted–style buckets with SSL, the SSL wild card +/// certificate only matches buckets that do not contain periods. +/// To work around this, use HTTP or write your own certificate verification logic. + +// We decided to not support bucketNames with '.' in them. +var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$`) + +// isValidBucketName - verify bucket name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func isValidBucketName(bucketName string) error { + if strings.TrimSpace(bucketName) == "" { + return ErrInvalidBucketName("Bucket name cannot be empty.") + } + if len(bucketName) < 3 { + return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.") + } + if len(bucketName) > 63 { + return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.") + } + if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { + return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.") + } + if !validBucketName.MatchString(bucketName) { + return ErrInvalidBucketName("Bucket name contains invalid characters.") + } + return nil +} + +// isValidObjectName - verify object name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func isValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return ErrInvalidObjectName("Object name cannot be empty.") + } + if len(objectName) > 1024 { + return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectName) { + return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.") + } + return nil +} + +// isValidObjectPrefix - verify if object prefix is valid. +func isValidObjectPrefix(objectPrefix string) error { + if len(objectPrefix) > 1024 { + return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.") + } + if !utf8.ValidString(objectPrefix) { + return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.") + } + return nil +} + +// calculatePartSize - calculate the optimal part size for the given objectSize. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxParts - 10000 +// maximumPartSize - 5GB +// +// if the partSize after division with maxParts is greater than minimumPartSize +// then choose miniumPartSize as the new part size, if not return minimumPartSize. +// +// Special cases +// +// - if input object size is -1 then return maxPartSize. +// - if it happens to be that partSize is indeed bigger +// than the maximum part size just return maxPartSize. +// +func calculatePartSize(objectSize int64) int64 { + // if object size is -1 choose part size as 5GB. + if objectSize == -1 { + return maxPartSize + } + // make sure last part has enough buffer and handle this poperly. + partSize := (objectSize / (maxParts - 1)) + if partSize > minimumPartSize { + if partSize > maxPartSize { + return maxPartSize + } + return partSize + } + return minimumPartSize +} + +// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func urlEncodePath(pathName string) string { + // if object matches reserved string, no need to encode them + reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + if reservedNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/init.go b/init.go new file mode 100644 index 0000000000..ae6c0b7ee9 --- /dev/null +++ b/init.go @@ -0,0 +1,207 @@ +package minio + +import ( + "net" + "net/http" + "net/url" + "regexp" + "runtime" + "strings" + "sync" +) + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is Latest i.e SignatureV4. +const ( + Latest SignatureType = iota + SignatureV4 + SignatureV2 +) + +// isV2 - is signature SignatureV2? +func (s SignatureType) isV2() bool { + return s == SignatureV2 +} + +// isV4 - is signature SignatureV4? +func (s SignatureType) isV4() bool { + return s == SignatureV4 +} + +// isLatest - is signature Latest? +func (s SignatureType) isLatest() bool { + return s == Latest +} + +// clientCredentials - main configuration struct used for credentials. +type clientCredentials struct { + /// Standard options. + AccessKeyID string // AccessKeyID required for authorized requests. + SecretAccessKey string // SecretAccessKey required for authorized requests. + Signature SignatureType // choose a signature type if necessary. +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "0.2.5" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// Minio (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// API is a container which delegates methods that comply with CloudStorageAPI interface. +type API struct { + // Needs allocation. + mutex *sync.Mutex + regionMap map[string]string + + // User supplied. + userAgent string + credentials *clientCredentials + endpointURL *url.URL + + // This http transport is usually needed for debugging OR to add your own + // custom TLS certificates on the client transport, for custom CA's and + // certs which are not part of standard certificate authority. + httpTransport http.RoundTripper +} + +// validEndpointDomain - regex for validating domain names. +var validEndpointDomain = regexp.MustCompile(`^(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|([a-zA-Z0-9][a-zA-Z0-9-_]{1,61}[a-zA-Z0-9]))\.([a-zA-Z]{2,6}|[a-zA-Z0-9-]{2,30}\.[a-zA-Z]{2,3})$`) + +// validIPAddress - regex for validating ip address. +var validIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !validIPAddress.MatchString(host) && !validEndpointDomain.MatchString(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !validIPAddress.MatchString(endpoint) && !validEndpointDomain.MatchString(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // if inSecure is true, use 'http' scheme. + scheme := "https" + if inSecure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURL := new(url.URL) + endpointURL.Host = endpoint + endpointURL.Scheme = scheme + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// NewV2 - instantiate minio client API with signature version '2'. +func NewV2(endpoint string, accessKeyID, secretAccessKey string, inSecure bool) (API, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, inSecure) + if err != nil { + return API{}, err + } + + // create a new client Config. + credentials := &clientCredentials{} + credentials.AccessKeyID = accessKeyID + credentials.SecretAccessKey = secretAccessKey + credentials.Signature = SignatureV2 + + // instantiate new API. + api := API{ + // Allocate. + mutex: &sync.Mutex{}, + regionMap: make(map[string]string), + // Save for lower level calls. + userAgent: libraryUserAgent, + credentials: credentials, + endpointURL: endpointURL, + } + return api, nil +} + +// NewV4 - instantiate minio client API with signature version '4'. +func NewV4(endpoint string, accessKeyID, secretAccessKey string, inSecure bool) (API, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, inSecure) + if err != nil { + return API{}, err + } + + // create a new client Config. + credentials := &clientCredentials{} + credentials.AccessKeyID = accessKeyID + credentials.SecretAccessKey = secretAccessKey + credentials.Signature = SignatureV4 + + // instantiate new API. + api := API{ + // Allocate. + mutex: &sync.Mutex{}, + regionMap: make(map[string]string), + // Save for lower level calls. + userAgent: libraryUserAgent, + credentials: credentials, + endpointURL: endpointURL, + } + return api, nil +} + +// New - instantiate minio client API. +/// TODO - add automatic verification of signature. +func New(endpoint string, accessKeyID, secretAccessKey string, inSecure bool) (API, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, inSecure) + if err != nil { + return API{}, err + } + + // create a new client Config. + credentials := &clientCredentials{} + credentials.AccessKeyID = accessKeyID + credentials.SecretAccessKey = secretAccessKey + + // Google cloud storage should be set to signature V2, force it if not. + if isGoogleEndpoint(endpointURL) { + credentials.Signature = SignatureV2 + } + // If Amazon S3 set to signature v2. + if isAmazonEndpoint(endpointURL) { + credentials.Signature = SignatureV4 + } + + // instantiate new API. + api := API{ + // Allocate. + mutex: &sync.Mutex{}, + regionMap: make(map[string]string), + // Save for lower level calls. + userAgent: libraryUserAgent, + credentials: credentials, + endpointURL: endpointURL, + } + return api, nil +} diff --git a/interface.go b/interface.go index fcd59b0c82..e4ed4378b7 100644 --- a/interface.go +++ b/interface.go @@ -18,13 +18,14 @@ package minio import ( "io" + "net/http" "time" ) // CloudStorageAPI - Cloud Storage API interface. type CloudStorageAPI interface { // Bucket Read/Write/Stat operations. - MakeBucket(bucket string, cannedACL BucketACL) error + MakeBucket(bucket string, cannedACL BucketACL, location string) error BucketExists(bucket string) error RemoveBucket(bucket string) error SetBucketACL(bucket string, cannedACL BucketACL) error @@ -49,4 +50,7 @@ type CloudStorageAPI interface { // Application info. SetAppInfo(appName, appVersion string) + + // Set custom transport. + SetCustomTransport(customTransport http.RoundTripper) } diff --git a/io.go b/io.go index f575be62a3..d8daa18596 100644 --- a/io.go +++ b/io.go @@ -20,6 +20,7 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "sync" ) @@ -28,7 +29,7 @@ type objectReadSeeker struct { // mutex. mutex *sync.Mutex - s3API API + api API reader io.ReadCloser isRead bool stat ObjectStat @@ -43,7 +44,7 @@ func newObjectReadSeeker(api API, bucket, object string) *objectReadSeeker { mutex: new(sync.Mutex), reader: nil, isRead: false, - s3API: api, + api: api, offset: 0, bucketName: bucket, objectName: object, @@ -69,7 +70,7 @@ func (r *objectReadSeeker) Read(p []byte) (int, error) { defer r.mutex.Unlock() if !r.isRead { - reader, _, err := r.s3API.getObject(r.bucketName, r.objectName, r.offset, 0) + reader, _, err := r.api.getObject(r.bucketName, r.objectName, r.offset, 0) if err != nil { return 0, err } @@ -110,7 +111,7 @@ func (r *objectReadSeeker) Seek(offset int64, whence int) (int64, error) { // Size returns the size of the object. If there is any error // it will be of type ErrorResponse. func (r *objectReadSeeker) Size() (int64, error) { - objectSt, err := r.s3API.headObject(r.bucketName, r.objectName) + objectSt, err := r.api.headObject(r.bucketName, r.objectName) r.stat = objectSt return r.stat.Size, err } @@ -134,6 +135,21 @@ func newTempFile(prefix string) (*tempFile, error) { }, nil } +// cleanupStaleTempFiles - cleanup any stale files present in temp directory at a prefix. +func cleanupStaleTempfiles(prefix string) error { + globPath := filepath.Join(os.TempDir(), prefix) + "*" + staleFiles, err := filepath.Glob(globPath) + if err != nil { + return err + } + for _, staleFile := range staleFiles { + if err := os.Remove(staleFile); err != nil { + return err + } + } + return nil +} + // Close - closer wrapper to close and remove temporary file. func (t *tempFile) Close() error { t.mutex.Lock() diff --git a/iterators.go b/iterators.go new file mode 100644 index 0000000000..c794dfddb3 --- /dev/null +++ b/iterators.go @@ -0,0 +1,263 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// listBucketsInRoutine goroutine based iterator for listBuckets. +func (a API) listBucketsInRoutine(ch chan<- BucketStat) { + defer close(ch) + listAllMyBucketListResults, err := a.listBuckets() + if err != nil { + ch <- BucketStat{ + Err: err, + } + return + } + for _, bucket := range listAllMyBucketListResults.Buckets.Bucket { + ch <- bucket + } +} + +// listObjectsInRoutine goroutine based iterator for listObjects. +func (a API) listObjectsInRoutine(bucketName, objectPrefix string, recursive bool, ch chan<- ObjectStat) { + defer close(ch) + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + ch <- ObjectStat{ + Err: err, + } + return + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + ch <- ObjectStat{ + Err: err, + } + return + } + // Recursive do not delimit. + if recursive { + var objectMarker string + for { + result, err := a.listObjects(bucketName, objectPrefix, objectMarker, "", 1000) + if err != nil { + ch <- ObjectStat{ + Err: err, + } + return + } + for _, object := range result.Contents { + ch <- object + objectMarker = object.Key + } + if !result.IsTruncated { + break + } + } + return + } + // Non recursive delimit with "/". + var objectMarker string + for { + result, err := a.listObjects(bucketName, objectPrefix, objectMarker, "/", 1000) + if err != nil { + ch <- ObjectStat{ + Err: err, + } + return + } + objectMarker = result.NextMarker + for _, object := range result.Contents { + ch <- object + } + for _, obj := range result.CommonPrefixes { + object := ObjectStat{} + object.Key = obj.Prefix + object.Size = 0 + ch <- object + } + if !result.IsTruncated { + break + } + } +} + +// listObjectPartsRecursive list all object parts recursively. +func (a API) listObjectPartsRecursive(bucketName, objectName, uploadID string) <-chan objectPartMetadata { + objectPartCh := make(chan objectPartMetadata, 1000) + go a.listObjectPartsRecursiveInRoutine(bucketName, objectName, uploadID, objectPartCh) + return objectPartCh +} + +// listObjectPartsRecursiveInRoutine gorountine based iterator for listing all object parts. +func (a API) listObjectPartsRecursiveInRoutine(bucketName, objectName, uploadID string, ch chan<- objectPartMetadata) { + defer close(ch) + listObjPartsResult, err := a.listObjectParts(bucketName, objectName, uploadID, 0, 1000) + if err != nil { + ch <- objectPartMetadata{ + Err: err, + } + return + } + for _, uploadedObjectPart := range listObjPartsResult.ObjectParts { + ch <- uploadedObjectPart + } + // listObject parts. + for { + if !listObjPartsResult.IsTruncated { + break + } + nextPartNumberMarker := listObjPartsResult.NextPartNumberMarker + listObjPartsResult, err = a.listObjectParts(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + ch <- objectPartMetadata{ + Err: err, + } + return + } + for _, uploadedObjectPart := range listObjPartsResult.ObjectParts { + ch <- uploadedObjectPart + } + } +} + +// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. +func (a API) getTotalMultipartSize(bucketName, objectName, uploadID string) (int64, error) { + var size int64 + // Iterate over all parts and aggregate the size. + for part := range a.listObjectPartsRecursive(bucketName, objectName, uploadID) { + if part.Err != nil { + return 0, part.Err + } + size += part.Size + } + return size, nil +} + +// listIncompleteUploads lists all incomplete uploads. +func (a API) listIncompleteUploads(bucketName, objectName string, recursive bool) <-chan ObjectMultipartStat { + ch := make(chan ObjectMultipartStat, 1000) + go a.listIncompleteUploadsInRoutine(bucketName, objectName, recursive, ch) + return ch +} + +// listIncompleteUploadsInRoutine goroutine based iterator for listing all incomplete uploads. +func (a API) listIncompleteUploadsInRoutine(bucketName, objectPrefix string, recursive bool, ch chan<- ObjectMultipartStat) { + defer close(ch) + // Validate incoming bucket name. + if err := isValidBucketName(bucketName); err != nil { + ch <- ObjectMultipartStat{ + Err: err, + } + return + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + ch <- ObjectMultipartStat{ + Err: err, + } + return + } + // Recursive with no delimiter. + if recursive { + var objectMarker string + var uploadIDMarker string + for { + result, err := a.listMultipartUploads(bucketName, objectMarker, uploadIDMarker, objectPrefix, "", 1000) + if err != nil { + ch <- ObjectMultipartStat{ + Err: err, + } + return + } + for _, objectSt := range result.Uploads { + // NOTE: getTotalMultipartSize can make listing incomplete uploads slower. + objectSt.Size, err = a.getTotalMultipartSize(bucketName, objectSt.Key, objectSt.UploadID) + if err != nil { + ch <- ObjectMultipartStat{ + Err: err, + } + } + ch <- objectSt + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + } + if !result.IsTruncated { + break + } + } + return + } + // Non recursive with "/" delimiter. + var objectMarker string + var uploadIDMarker string + for { + result, err := a.listMultipartUploads(bucketName, objectMarker, uploadIDMarker, objectPrefix, "/", 1000) + if err != nil { + ch <- ObjectMultipartStat{ + Err: err, + } + return + } + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + for _, objectSt := range result.Uploads { + objectSt.Size, err = a.getTotalMultipartSize(bucketName, objectSt.Key, objectSt.UploadID) + if err != nil { + ch <- ObjectMultipartStat{ + Err: err, + } + } + ch <- objectSt + } + for _, obj := range result.CommonPrefixes { + object := ObjectMultipartStat{} + object.Key = obj.Prefix + object.Size = 0 + ch <- object + } + if !result.IsTruncated { + break + } + } +} + +// removeIncompleteUploadInRoutine iterates over all incomplete uploads +// and removes only input object name. +func (a API) removeIncompleteUploadInRoutine(bucketName, objectName string, errorCh chan<- error) { + defer close(errorCh) + // Validate incoming bucket name. + if err := isValidBucketName(bucketName); err != nil { + errorCh <- err + return + } + // Validate incoming object name. + if err := isValidObjectName(objectName); err != nil { + errorCh <- err + return + } + // List all incomplete uploads recursively. + for mpUpload := range a.listIncompleteUploads(bucketName, objectName, true) { + if objectName == mpUpload.Key { + err := a.abortMultipartUpload(bucketName, mpUpload.Key, mpUpload.UploadID) + if err != nil { + errorCh <- err + return + } + return + } + } +} diff --git a/parts-manager.go b/parts-manager.go index 1e78c4e543..ba2cfcd848 100644 --- a/parts-manager.go +++ b/parts-manager.go @@ -27,104 +27,76 @@ import ( // backed by a temporary file which purges itself upon Close(). // // This method runs until an EOF or an error occurs. Before returning, the channel is always closed. -func partsManager(reader io.Reader, partSize int64, isEnableSha256Sum bool) <-chan partMetadata { +func partsManager(reader io.Reader, partSize int64, enableSha256Sum bool) <-chan partMetadata { ch := make(chan partMetadata, 3) - go partsManagerInRoutine(reader, partSize, isEnableSha256Sum, ch) + go partsManagerInRoutine(reader, partSize, enableSha256Sum, ch) return ch } -func partsManagerInRoutine(reader io.Reader, partSize int64, isEnableSha256Sum bool, ch chan<- partMetadata) { +func partsManagerInRoutine(reader io.Reader, partSize int64, enableSha256Sum bool, ch chan<- partMetadata) { defer close(ch) - tmpFile, err := newTempFile("multiparts$") - if err != nil { - ch <- partMetadata{ - Err: err, - } - return - } - var hashMD5 hash.Hash - var hashSha256 hash.Hash + // Any error generated when creating parts. + var err error + // Size of the each part read, could be shorter than partSize. + var size int64 + // Tempfile structure backed by Closer to clean itself up. + var tmpFile *tempFile + // MD5 and Sha256 hasher. + var hashMD5, hashSha256 hash.Hash + // Collective multi writer. var writer io.Writer - hashMD5 = md5.New() - mwwriter := io.MultiWriter(hashMD5) - if isEnableSha256Sum { - hashSha256 = sha256.New() - mwwriter = io.MultiWriter(hashMD5, hashSha256) - } - writer = io.MultiWriter(tmpFile, mwwriter) - n, err := io.CopyN(writer, reader, partSize) - if err == io.EOF || err == io.ErrUnexpectedEOF { + for { + tmpFile, err = newTempFile("multiparts$") + if err != nil { + break + } + // Create a hash multiwriter. + hashMD5 = md5.New() + hashWriter := io.MultiWriter(hashMD5) + if enableSha256Sum { + hashSha256 = sha256.New() + hashWriter = io.MultiWriter(hashMD5, hashSha256) + } + writer = io.MultiWriter(tmpFile, hashWriter) + size, err = io.CopyN(writer, reader, partSize) + if err != nil { + break + } // Seek back to beginning. tmpFile.Seek(0, 0) - - // short read, only single partMetadata return. partMdata := partMetadata{ MD5Sum: hashMD5.Sum(nil), ReadCloser: tmpFile, - Size: n, + Size: size, Err: nil, } - if isEnableSha256Sum { + if enableSha256Sum { partMdata.Sha256Sum = hashSha256.Sum(nil) } ch <- partMdata - return - } - // unknown error considered catastrophic error, return here. - if err != nil { - ch <- partMetadata{ - Err: err, - } - return - } - // Seek back to beginning. - tmpFile.Seek(0, 0) - partMdata := partMetadata{ - MD5Sum: hashMD5.Sum(nil), - ReadCloser: tmpFile, - Size: n, - Err: nil, } - if isEnableSha256Sum { - partMdata.Sha256Sum = hashSha256.Sum(nil) - } - ch <- partMdata - for err == nil { - var n int64 - tmpFile, err = newTempFile("multiparts$") - if err != nil { - ch <- partMetadata{ - Err: err, - } - return - } - hashMD5 = md5.New() - mwwriter := io.MultiWriter(hashMD5) - if isEnableSha256Sum { - hashSha256 = sha256.New() - mwwriter = io.MultiWriter(hashMD5, hashSha256) - } - writer = io.MultiWriter(tmpFile, mwwriter) - n, err = io.CopyN(writer, reader, partSize) - if err != nil { - if err != io.EOF && err != io.ErrUnexpectedEOF { // catastrophic error - ch <- partMetadata{ - Err: err, - } - return - } - } + // If end of file reached, we send the last part. + if err == io.EOF { // Seek back to beginning. tmpFile.Seek(0, 0) + + // last part. partMdata := partMetadata{ MD5Sum: hashMD5.Sum(nil), ReadCloser: tmpFile, - Size: n, + Size: size, Err: nil, } - if isEnableSha256Sum { + if enableSha256Sum { partMdata.Sha256Sum = hashSha256.Sum(nil) } ch <- partMdata + return + } + if err != io.EOF { + ch <- partMetadata{ + Err: err, + } + return } } diff --git a/put-object.go b/put-object.go new file mode 100644 index 0000000000..89332fd88c --- /dev/null +++ b/put-object.go @@ -0,0 +1,240 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "runtime" + "sort" + "strconv" + "sync" +) + +// maxConcurrentQueue - max concurrent upload queue, defaults to number of CPUs - 1. +var maxConcurrentQueue = int(math.Max(float64(runtime.NumCPU())-1, 1)) + +// completedParts is a wrapper to make parts sortable by their part numbers. +// multi part completion requires list of multi parts to be sorted. +type completedParts []completePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// putParts - fully managed multipart uploader, resumes where its left off at `uploadID` +func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeker, size int64) error { + // Cleanup any previously left stale files, as the function exits. + defer cleanupStaleTempfiles("multiparts$") + + var seekOffset int64 + partNumber := 1 + completeMultipartUpload := completeMultipartUpload{} + for objPart := range a.listObjectPartsRecursive(bucketName, objectName, uploadID) { + if objPart.Err != nil { + return objPart.Err + } + // Verify if there is a hole i.e one of the parts is missing + // Break and start uploading that part. + if partNumber != objPart.PartNumber { + break + } + var completedPart completePart + completedPart.PartNumber = objPart.PartNumber + completedPart.ETag = objPart.ETag + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart) + seekOffset += objPart.Size // Add seek Offset for future Seek to skip entries. + // Increment lexically to verify holes in next iteration. + partNumber++ + } + + // Calculate the optimal part size for a given size. + partSize := calculatePartSize(size) + + type erroredPart struct { + err error + closer io.ReadCloser + } + // Allocate bufferred error channel for maximum parts. + errCh := make(chan erroredPart, maxParts) + + // Allocate bufferred upload part channel. + uploadedPartsCh := make(chan completePart, maxParts) + + // Limit multipart queue size to max concurrent queue, defaults to NCPUs - 1. + mpQueueCh := make(chan struct{}, maxConcurrentQueue) + + // Close all our channels. + defer close(errCh) + defer close(mpQueueCh) + defer close(uploadedPartsCh) + + // Allocate a new wait group. + wg := new(sync.WaitGroup) + + // Seek to the new offset if greater than '0' + if seekOffset > 0 { + if _, err := data.Seek(seekOffset, 0); err != nil { + return err + } + } + + var enableSha256Sum bool + // if signature V4 - enable Sha256 calculation for individual parts. + if a.credentials.Signature.isV4() { + enableSha256Sum = true + } + + // Chunk all parts at partSize and start uploading. + for part := range partsManager(data, partSize, enableSha256Sum) { + // Limit to NCPUs-1 parts at a given time. + mpQueueCh <- struct{}{} + // Account for all parts uploaded simultaneousy. + wg.Add(1) + part.Number = partNumber + go func(mpQueueCh <-chan struct{}, part partMetadata, wg *sync.WaitGroup, + errCh chan<- erroredPart, uploadedPartsCh chan<- completePart) { + defer wg.Done() + defer func() { + <-mpQueueCh + }() + if part.Err != nil { + errCh <- erroredPart{ + err: part.Err, + closer: part.ReadCloser, + } + return + } + complPart, err := a.uploadPart(bucketName, objectName, uploadID, part) + if err != nil { + errCh <- erroredPart{ + err: err, + closer: part.ReadCloser, + } + return + } + uploadedPartsCh <- complPart + errCh <- erroredPart{ + err: nil, + } + }(mpQueueCh, part, wg, errCh, uploadedPartsCh) + // If any errors return right here. + if erroredPrt, ok := <-errCh; ok { + if erroredPrt.err != nil { + // Close the part to remove it from disk. + erroredPrt.closer.Close() + return erroredPrt.err + } + } + // If success fully uploaded, save them in Parts. + if uploadedPart, ok := <-uploadedPartsCh; ok { + completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, uploadedPart) + } + partNumber++ + } + wg.Wait() + sort.Sort(completedParts(completeMultipartUpload.Parts)) + _, err := a.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + if err != nil { + return err + } + return nil +} + +// putNoChecksum special function used for anonymous uploads and Google Cloud Storage. +// This special function is necessary since Amazon S3 doesn't allow multipart uploads +// for anonymous requests. This special function is also used for Google Cloud Storage +// since multipart API is not S3 compatible. +func (a API) putNoChecksum(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) error { + if size > maxPartSize { + return ErrorResponse{ + Code: "EntityTooLarge", + Message: "Your proposed upload exceeds the maximum allowed object size '5GB' for single PUT operation.", + BucketName: bucketName, + Key: objectName, + } + } + // For anonymous requests, we will not calculate sha256 and md5sum. + putObjMetadata := putObjectMetadata{ + MD5Sum: nil, + Sha256Sum: nil, + ReadCloser: ioutil.NopCloser(data), + Size: size, + ContentType: contentType, + } + if _, err := a.putObject(bucketName, objectName, putObjMetadata); err != nil { + return err + } + return nil +} + +// putSmallObject uploads files smaller than 5 mega bytes. +func (a API) putSmallObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) error { + dataBytes, err := ioutil.ReadAll(data) + if err != nil { + return err + } + if int64(len(dataBytes)) != size { + msg := fmt.Sprintf("Data read ‘%s’ is not equal to expected size ‘%s’", + strconv.FormatInt(int64(len(dataBytes)), 10), strconv.FormatInt(size, 10)) + return ErrorResponse{ + Code: "UnexpectedShortRead", + Message: msg, + BucketName: bucketName, + Key: objectName, + } + } + putObjMetadata := putObjectMetadata{ + MD5Sum: sumMD5(dataBytes), + Sha256Sum: sum256(dataBytes), + ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)), + Size: size, + ContentType: contentType, + } + // Single part use case, use putObject directly. + if _, err = a.putObject(bucketName, objectName, putObjMetadata); err != nil { + return err + } + return nil +} + +// putLargeObject uploads files bigger than 5 mega bytes. +func (a API) putLargeObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) error { + var uploadID string + isRecursive := true + for mpUpload := range a.listIncompleteUploads(bucketName, objectName, isRecursive) { + if mpUpload.Err != nil { + return mpUpload.Err + } + if mpUpload.Key == objectName { + uploadID = mpUpload.UploadID + break + } + } + if uploadID == "" { + initMultipartUploadResult, err := a.initiateMultipartUpload(bucketName, objectName, contentType) + if err != nil { + return err + } + uploadID = initMultipartUploadResult.UploadID + } + // Initiate multipart upload. + return a.putParts(bucketName, objectName, uploadID, data, size) +} diff --git a/request-signature-v2.go b/request-signature-v2.go index 74fa9abc6c..6137d11f95 100644 --- a/request-signature-v2.go +++ b/request-signature-v2.go @@ -31,11 +31,31 @@ import ( "time" ) +// Encode input URL path to URL encoded path. +func encodeURL2Path(u *url.URL) string { + // Encode URL path. + var path string + if !isAmazonEndpoint(u) && !isGoogleEndpoint(u) { + path = urlEncodePath(u.Path) + } + if strings.HasSuffix(u.Host, ".s3.amazonaws.com") { + path = "/" + strings.TrimSuffix(u.Host, ".s3.amazonaws.com") + path += u.Path + path = urlEncodePath(path) + } + if strings.HasSuffix(u.Host, ".storage.googleapis.com") { + path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") + path += u.Path + path = urlEncodePath(path) + } + return path +} + // PreSignV2 - presign the request in following style. // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} func (r *Request) PreSignV2() (string, error) { // if config is anonymous then presigning cannot be achieved, throw an error. - if r.config.isAnonymous() { + if isAnonymousCredentials(*r.credentials) { return "", errors.New("Presigning cannot be achieved with anonymous credentials") } d := time.Now().UTC() @@ -43,37 +63,26 @@ func (r *Request) PreSignV2() (string, error) { if date := r.Get("Date"); date == "" { r.Set("Date", d.Format(http.TimeFormat)) } - var path string - // Encode URL path. - if r.config.isVirtualHostedStyle { - for k, v := range regions { - if v == r.config.Region { - path = "/" + strings.TrimSuffix(r.req.URL.Host, "."+k) - path += r.req.URL.Path - path = getURLEncodedPath(path) - break - } - } - } else { - path = getURLEncodedPath(r.req.URL.Path) - } + + // Get encoded URL path. + path := encodeURL2Path(r.req.URL) // Find epoch expires when the request will expire. epochExpires := d.Unix() + r.expires // get string to sign. stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", r.req.Method, epochExpires, path) - hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) + hm := hmac.New(sha1.New, []byte(r.credentials.SecretAccessKey)) hm.Write([]byte(stringToSign)) // calculate signature. signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) query := r.req.URL.Query() // Handle specially for Google Cloud Storage. - if r.config.Region == "google" { - query.Set("GoogleAccessId", r.config.AccessKeyID) + if strings.Contains(r.req.URL.Host, ".storage.googleapis.com") { + query.Set("GoogleAccessId", r.credentials.AccessKeyID) } else { - query.Set("AWSAccessKeyId", r.config.AccessKeyID) + query.Set("AWSAccessKeyId", r.credentials.AccessKeyID) } // Fill in Expires and Signature for presigned query. @@ -86,7 +95,7 @@ func (r *Request) PreSignV2() (string, error) { // PostPresignSignatureV2 - presigned signature for PostPolicy request func (r *Request) PostPresignSignatureV2(policyBase64 string) string { - hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) + hm := hmac.New(sha1.New, []byte(r.credentials.SecretAccessKey)) hm.Write([]byte(policyBase64)) signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) return signature @@ -120,12 +129,12 @@ func (r *Request) SignV2() { // Calculate HMAC for secretAccessKey. stringToSign := r.getStringToSignV2() - hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey)) + hm := hmac.New(sha1.New, []byte(r.credentials.SecretAccessKey)) hm.Write([]byte(stringToSign)) // Prepare auth header. authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("AWS %s:", r.config.AccessKeyID)) + authHeader.WriteString(fmt.Sprintf("AWS %s:", r.credentials.AccessKeyID)) encoder := base64.NewEncoder(base64.StdEncoding, authHeader) encoder.Write(hm.Sum(nil)) encoder.Close() @@ -231,18 +240,10 @@ var resourceList = []string{ // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; func (r *Request) writeCanonicalizedResource(buf *bytes.Buffer) error { requestURL := r.req.URL - if r.config.isVirtualHostedStyle { - for k, v := range regions { - if v == r.config.Region { - path := "/" + strings.TrimSuffix(requestURL.Host, "."+k) - path += requestURL.Path - buf.WriteString(getURLEncodedPath(path)) - break - } - } - } else { - buf.WriteString(getURLEncodedPath(requestURL.Path)) - } + // Get encoded URL path. + path := encodeURL2Path(requestURL) + buf.WriteString(path) + sort.Strings(resourceList) if requestURL.RawQuery != "" { var n int diff --git a/request-signature-v4.go b/request-signature-v4.go index bd02f22c72..02d50efff6 100644 --- a/request-signature-v4.go +++ b/request-signature-v4.go @@ -173,7 +173,7 @@ func (r *Request) getCanonicalRequest() string { r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1) canonicalRequest := strings.Join([]string{ r.req.Method, - getURLEncodedPath(r.req.URL.Path), + urlEncodePath(r.req.URL.Path), r.req.URL.RawQuery, r.getCanonicalHeaders(), r.getSignedHeaders(), @@ -185,7 +185,7 @@ func (r *Request) getCanonicalRequest() string { // getStringToSign a string based on selected query values. func (r *Request) getStringToSignV4(canonicalRequest string, t time.Time) string { stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(r.config.Region, t) + "\n" + stringToSign = stringToSign + getScope(r.bucketRegion, t) + "\n" stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) return stringToSign } @@ -193,16 +193,16 @@ func (r *Request) getStringToSignV4(canonicalRequest string, t time.Time) string // PreSignV4 presign the request, in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. func (r *Request) PreSignV4() (string, error) { - if r.config.isAnonymous() { + if isAnonymousCredentials(*r.credentials) { return "", errors.New("presigning cannot be done with anonymous credentials") } // Initial time. t := time.Now().UTC() // get credential string. - credential := getCredential(r.config.AccessKeyID, r.config.Region, t) + credential := getCredential(r.credentials.AccessKeyID, r.bucketRegion, t) // get hmac signing key. - signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signingKey := getSigningKey(r.credentials.SecretAccessKey, r.bucketRegion, t) // Get all signed headers. signedHeaders := r.getSignedHeaders() @@ -227,14 +227,14 @@ func (r *Request) PreSignV4() (string, error) { // PostPresignSignatureV4 - presigned signature for PostPolicy requests. func (r *Request) PostPresignSignatureV4(policyBase64 string, t time.Time) string { - signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signingkey := getSigningKey(r.credentials.SecretAccessKey, r.bucketRegion, t) signature := getSignature(signingkey, policyBase64) return signature } // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func (r *Request) SignV4(presign bool) { +func (r *Request) SignV4() { // Initial time. t := time.Now().UTC() // Set x-amz-date. @@ -246,9 +246,9 @@ func (r *Request) SignV4(presign bool) { stringToSign := r.getStringToSignV4(r.getCanonicalRequest(), t) // get credential string. - credential := getCredential(r.config.AccessKeyID, r.config.Region, t) + credential := getCredential(r.credentials.AccessKeyID, r.bucketRegion, t) // get hmac signing key. - signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t) + signingKey := getSigningKey(r.credentials.SecretAccessKey, r.bucketRegion, t) // calculate signature. signature := getSignature(signingKey, stringToSign) diff --git a/request.go b/request.go index e1db879f65..63d2bbadec 100644 --- a/request.go +++ b/request.go @@ -21,61 +21,81 @@ import ( "encoding/hex" "io" "net/http" - "regexp" - "strings" - "unicode/utf8" + "net/url" ) -// operation - rest operation. -type operation struct { - HTTPServer string - HTTPMethod string - HTTPPath string -} - // Request - a http request. type Request struct { - req *http.Request - config *Config - expires int64 + req *http.Request + credentials *clientCredentials + transport http.RoundTripper + bucketRegion string + expires int64 } -// requestMetadata a http request metadata. +// requestMetadata - is container for all the values to make a request. type requestMetadata struct { - body io.ReadCloser - contentType string + // User supplied. + expires int64 + userAgent string + bucketRegion string + credentials *clientCredentials + contentTransport http.RoundTripper + contentHeader http.Header + + // Generated by our internal code. + contentBody io.ReadCloser contentLength int64 - sha256PayloadBytes []byte - md5SumPayloadBytes []byte + contentSha256Bytes []byte + contentMD5Bytes []byte +} + +// getTargetURL - construct a encoded URL for the requests. +func getTargetURL(endpoint *url.URL, bucketName, objectName string, queryValues url.Values) (*url.URL, error) { + var urlStr string + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support this. + if isVirtualHostSupported(endpoint) { + urlStr = endpoint.Scheme + "://" + bucketName + "." + endpoint.Host + urlStr = urlStr + "/" + urlEncodePath(objectName) + } else { + // If not fall back to using path style. + urlStr = endpoint.Scheme + "://" + endpoint.Host + "/" + bucketName + if objectName != "" { + urlStr = urlStr + "/" + urlEncodePath(objectName) + } + } + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + queryValues.Encode() + } + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + return u, nil } // Do - start the request. func (r *Request) Do() (resp *http.Response, err error) { // if not an anonymous request, calculate relevant signature. - if !r.config.isAnonymous() { - if r.config.Signature.isV2() { + if !isAnonymousCredentials(*r.credentials) { + if r.credentials.Signature.isV2() { // if signature version '2' requested, use that. r.SignV2() } - if r.config.Signature.isV4() || r.config.Signature.isLatest() { + if r.credentials.Signature.isV4() || r.credentials.Signature.isLatest() { // Not a presigned request, set behavior to default. - presign := false - r.SignV4(presign) + r.SignV4() } } + // Use custom transport if any. transport := http.DefaultTransport - if r.config.Transport != nil { - transport = r.config.Transport + if r.transport != nil { + transport = r.transport } - // do not use http.Client{}, while it may seem intuitive but the problem seems to be - // that http.Client{} internally follows redirects and there is no easier way to disable - // it from outside using a configuration parameter - - // this auto redirect causes complications in verifying subsequent errors - // - // The best is to use RoundTrip() directly, so the request comes back to the caller where - // we are going to handle such replies. And indeed that is the right thing to do here. - // - return transport.RoundTrip(r.req) + client := &http.Client{Transport: transport} + return client.Do(r.req) } // Set - set additional headers if any. @@ -88,178 +108,41 @@ func (r *Request) Get(key string) string { return r.req.Header.Get(key) } -// path2BucketAndObject - extract bucket and object names from URL path. -func path2BucketAndObject(path string) (bucketName, objectName string) { - pathSplits := strings.SplitN(path, "?", 2) - splits := strings.SplitN(pathSplits[0], separator, 3) - switch len(splits) { - case 0, 1: - bucketName = "" - objectName = "" - case 2: - bucketName = splits[1] - objectName = "" - case 3: - bucketName = splits[1] - objectName = splits[2] - } - return bucketName, objectName -} - -// path2Object gives objectName from URL path -func path2Object(path string) (objectName string) { - _, objectName = path2BucketAndObject(path) - return -} - -// path2Bucket gives bucketName from URL path -func path2Bucket(path string) (bucketName string) { - bucketName, _ = path2BucketAndObject(path) - return -} - -// path2Query gives query part from URL path -func path2Query(path string) (query string) { - pathSplits := strings.SplitN(path, "?", 2) - if len(pathSplits) > 1 { - query = pathSplits[1] - } - return -} - -// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func getURLEncodedPath(pathName string) string { - // if object matches reserved string, no need to encode them - reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - if reservedNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } - } - return encodedPathname -} - -// getRequetURL - get a properly encoded request URL. -func (op *operation) getRequestURL(config Config) (url string) { - // parse URL for the combination of HTTPServer + HTTPPath - url = op.HTTPServer + separator - if !config.isVirtualHostedStyle { - url += path2Bucket(op.HTTPPath) - } - objectName := getURLEncodedPath(path2Object(op.HTTPPath)) - queryPath := path2Query(op.HTTPPath) - if objectName == "" && queryPath != "" { - url += "?" + queryPath - return - } - if objectName != "" && queryPath == "" { - if strings.HasSuffix(url, separator) { - url += objectName - } else { - url += separator + objectName - } - return - } - if objectName != "" && queryPath != "" { - if strings.HasSuffix(url, separator) { - url += objectName + "?" + queryPath - } else { - url += separator + objectName + "?" + queryPath - } - } - return -} - -// newPresignedRequest - provides a new instance of *Request* for presign operations. -func newPresignedRequest(op *operation, config *Config, expires int64) (*Request, error) { - // if no method default to POST. - method := op.HTTPMethod +// newRequest - provides a new instance of *Request*. +func newRequest(method string, targetURL *url.URL, metadata requestMetadata) (*Request, error) { if method == "" { method = "POST" } - - u := op.getRequestURL(*config) - + urlStr := targetURL.String() // get a new HTTP request, for the requested method. - req, err := http.NewRequest(method, u, nil) + req, err := http.NewRequest(method, urlStr, nil) if err != nil { return nil, err } - // set UserAgent. - req.Header.Set("User-Agent", config.userAgent) + // Set content body if available. + if metadata.contentBody != nil { + req.Body = metadata.contentBody + } // save for subsequent use. r := new(Request) - r.config = config - r.expires = expires r.req = req + r.credentials = metadata.credentials + r.bucketRegion = metadata.bucketRegion - return r, nil -} - -// newRequest - provides a new instance of *Request*. -func newRequest(op *operation, config *Config, metadata requestMetadata) (*Request, error) { - // if no method default to POST. - method := op.HTTPMethod - if method == "" { - method = "POST" - } - - u := op.getRequestURL(*config) - // get a new HTTP request, for the requested method. - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - // set UserAgent. - req.Header.Set("User-Agent", config.userAgent) - - // add body. - switch { - case metadata.body == nil: - req.Body = nil - default: - req.Body = metadata.body + // If presigned request, return. + if metadata.expires != 0 { + r.expires = metadata.expires + return r, nil } - // save for subsequent use. - r := new(Request) - r.config = config - r.req = req + // set UserAgent for the request. + r.Set("User-Agent", metadata.userAgent) - // Set contentType for the request. - if metadata.contentType != "" { - r.Set("Content-Type", metadata.contentType) + // Set all headers. + for k, v := range metadata.contentHeader { + r.Set(k, v[0]) } // set incoming content-length. @@ -268,15 +151,18 @@ func newRequest(op *operation, config *Config, metadata requestMetadata) (*Reque } // set sha256 sum for signature calculation only with signature version '4'. - if r.config.Signature.isV4() || r.config.Signature.isLatest() { + if r.credentials.Signature.isV4() || r.credentials.Signature.isLatest() { r.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) - if metadata.sha256PayloadBytes != nil { - r.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.sha256PayloadBytes)) + if metadata.contentSha256Bytes != nil { + r.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes)) } } + // set md5Sum for content protection. - if metadata.md5SumPayloadBytes != nil { - r.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.md5SumPayloadBytes)) + if metadata.contentMD5Bytes != nil { + r.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) } + + // return request. return r, nil } diff --git a/s3-api-errors.go b/s3-api-errors.go deleted file mode 100644 index 7b3c82599a..0000000000 --- a/s3-api-errors.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "net/http" - -// TODO - handle this automatically by re-writing the request as virtual style. -// -// For path style requests on buckets with wrong endpoint s3 returns back a -// generic error. Following block of code tries to make this meaningful for -// the user by fetching the proper endpoint. Additionally it also sets AmzBucketRegion. -func (a s3API) handleStatusMovedPermanently(resp *http.Response, bucketName, objectName string) ErrorResponse { - errorResponse := ErrorResponse{ - Code: "PermanentRedirect", - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), - } - errorResponse.Resource = separator + bucketName - if objectName != "" { - errorResponse.Resource = separator + bucketName + separator + objectName - } - var endPoint string - if errorResponse.AmzBucketRegion != "" { - region := errorResponse.AmzBucketRegion - endPoint = getEndpoint(region) - } else { - region, err := a.getBucketLocation(bucketName) - if err != nil { - return *ToErrorResponse(err) - } - endPoint = getEndpoint(region) - } - msg := "The bucket you are attempting to access must be addressed using the specified endpoint https://" + endPoint + ". Send all future requests to this endpoint." - errorResponse.Message = msg - return errorResponse -} diff --git a/s3-api.go b/s3-api.go index 6fbafa2f69..ffa0be6823 100644 --- a/s3-api.go +++ b/s3-api.go @@ -23,20 +23,13 @@ import ( "io" "io/ioutil" "net/http" + "net/url" + "path/filepath" "strconv" "strings" "time" ) -const ( - separator = "/" -) - -// s3API container to hold unexported internal functions. -type s3API struct { - config *Config -} - // closeResp close non nil response with any response Body. // convenient wrapper to drain any remaining data on response body. // @@ -57,54 +50,99 @@ func closeResp(resp *http.Response) { } } +// setRegion - set region for the bucketName in private region map cache. +func (a API) setRegion(bucketName string) (string, error) { + // If signature version '2', no need to fetch bucket location. + if a.credentials.Signature.isV2() { + return "us-east-1", nil + } + if a.credentials.Signature.isV4() && !isAmazonEndpoint(a.endpointURL) { + return "us-east-1", nil + } + // get bucket location. + location, err := a.getBucketLocation(bucketName) + if err != nil { + return "", err + } + // location is region in context of S3 API. + a.mutex.Lock() + a.regionMap[bucketName] = location + a.mutex.Unlock() + return location, nil +} + +// getRegion - get region for the bucketName from region map cache. +func (a API) getRegion(bucketName string) (string, error) { + // If signature version '2', no need to fetch bucket location. + if a.credentials.Signature.isV2() { + return "us-east-1", nil + } + // If signature version '4' and latest and endpoint is not Amazon. + // Return 'us-east-1' + if a.credentials.Signature.isV4() || a.credentials.Signature.isLatest() { + if !isAmazonEndpoint(a.endpointURL) { + return "us-east-1", nil + } + } + // Search through regionMap protected. + a.mutex.Lock() + region, ok := a.regionMap[bucketName] + a.mutex.Unlock() + // return if found. + if ok { + return region, nil + } + // Set region if no region was found for a bucket. + region, err := a.setRegion(bucketName) + if err != nil { + return "us-east-1", err + } + return region, nil +} + // putBucketRequest wrapper creates a new putBucket request. -func (a s3API) putBucketRequest(bucket, acl, location string) (*Request, error) { - var r *Request - var err error - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket, - } - var createBucketConfigBuffer *bytes.Buffer - // If location is set use it and create proper bucket configuration. - switch { - case location != "": +func (a API) putBucketRequest(bucketName, acl, region string) (*Request, error) { + // get target URL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", url.Values{}) + if err != nil { + return nil, err + } + + // Initialize request metadata. + var rmetadata requestMetadata + rmetadata = requestMetadata{ + userAgent: a.userAgent, + credentials: a.credentials, + bucketRegion: region, + } + + // If region is set use to create bucket location config. + if region != "" { createBucketConfig := new(createBucketConfiguration) - createBucketConfig.Location = location + createBucketConfig.Location = region var createBucketConfigBytes []byte createBucketConfigBytes, err = xml.Marshal(createBucketConfig) if err != nil { return nil, err } - createBucketConfigBuffer = bytes.NewBuffer(createBucketConfigBytes) + createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) + rmetadata.contentBody = ioutil.NopCloser(createBucketConfigBuffer) + rmetadata.contentLength = int64(createBucketConfigBuffer.Len()) + rmetadata.contentSha256Bytes = sum256(createBucketConfigBuffer.Bytes()) } - switch { - case createBucketConfigBuffer == nil: - r, err = newRequest(op, a.config, requestMetadata{}) - if err != nil { - return nil, err - } - default: - rmetadata := requestMetadata{ - body: ioutil.NopCloser(createBucketConfigBuffer), - contentLength: int64(createBucketConfigBuffer.Len()), - sha256PayloadBytes: sum256(createBucketConfigBuffer.Bytes()), - } - r, err = newRequest(op, a.config, rmetadata) - if err != nil { - return nil, err - } - } - // by default bucket is private - switch { - case acl != "": - r.Set("x-amz-acl", acl) - default: - r.Set("x-amz-acl", "private") + + // Initialize new request. + req, err := newRequest("PUT", targetURL, rmetadata) + if err != nil { + return nil, err } - return r, nil + // by default bucket acl is set to private. + req.Set("x-amz-acl", "private") + if acl != "" { + req.Set("x-amz-acl", acl) + } + return req, nil } /// Bucket Write Operations @@ -126,15 +164,17 @@ func (a s3API) putBucketRequest(bucket, acl, location string) (*Request, error) // authenticated-read - owner gets full access, authenticated users get read access. // ------------------ // -// Location valid values. +// Region valid values. // ------------------ // [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ] // Default - US standard -func (a s3API) putBucket(bucket, acl, location string) error { - req, err := a.putBucketRequest(bucket, acl, location) +func (a API) putBucket(bucketName, acl, region string) error { + // Initialize a new request. + req, err := a.putBucketRequest(bucketName, acl, region) if err != nil { return err } + // Initiate the request. resp, err := req.Do() defer closeResp(resp) if err != nil { @@ -149,26 +189,52 @@ func (a s3API) putBucket(bucket, acl, location string) error { } // putBucketRequestACL wrapper creates a new putBucketACL request. -func (a s3API) putBucketACLRequest(bucket, acl string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + "?acl", +func (a API) putBucketACLRequest(bucketName, acl string) (*Request, error) { + // Set acl query. + urlValues := make(url.Values) + urlValues.Set("acl", "") + + // get target URL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err } - req, err := newRequest(op, a.config, requestMetadata{}) + + // Instantiate a new request. + req, err := newRequest("PUT", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) if err != nil { return nil, err } - req.Set("x-amz-acl", acl) + + // Set relevant acl. + if acl != "" { + req.Set("x-amz-acl", acl) + } else { + req.Set("x-amz-acl", "private") + } + + // Return. return req, nil } // putBucketACL set the permissions on an existing bucket using Canned ACL's. -func (a s3API) putBucketACL(bucket, acl string) error { - req, err := a.putBucketACLRequest(bucket, acl) +func (a API) putBucketACL(bucketName, acl string) error { + // Initialize a new request. + req, err := a.putBucketACLRequest(bucketName, acl) if err != nil { return err } + // Initiate the request. resp, err := req.Do() defer closeResp(resp) if err != nil { @@ -176,9 +242,6 @@ func (a s3API) putBucketACL(bucket, acl string) error { } if resp != nil { if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusMovedPermanently { - return a.handleStatusMovedPermanently(resp, bucket, "") - } return BodyToErrorResponse(resp.Body) } } @@ -186,13 +249,28 @@ func (a s3API) putBucketACL(bucket, acl string) error { } // getBucketACLRequest wrapper creates a new getBucketACL request. -func (a s3API) getBucketACLRequest(bucket string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + "?acl", +func (a API) getBucketACLRequest(bucketName string) (*Request, error) { + // Set acl query. + urlValues := make(url.Values) + urlValues.Set("acl", "") + + // get target URL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues) + if err != nil { + return nil, err } - req, err := newRequest(op, a.config, requestMetadata{}) + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + + // Instantiate a new request. + req, err := newRequest("GET", targetURL, requestMetadata{ + bucketRegion: region, + credentials: a.credentials, + }) if err != nil { return nil, err } @@ -200,11 +278,14 @@ func (a s3API) getBucketACLRequest(bucket string) (*Request, error) { } // getBucketACL get the acl information on an existing bucket. -func (a s3API) getBucketACL(bucketName string) (accessControlPolicy, error) { +func (a API) getBucketACL(bucketName string) (accessControlPolicy, error) { + // Initialize a new request. req, err := a.getBucketACLRequest(bucketName) if err != nil { return accessControlPolicy{}, err } + + // Initiate the request. resp, err := req.Do() defer closeResp(resp) if err != nil { @@ -212,27 +293,26 @@ func (a s3API) getBucketACL(bucketName string) (accessControlPolicy, error) { } if resp != nil { if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusMovedPermanently { - errorResponse := a.handleStatusMovedPermanently(resp, bucketName, "") - return accessControlPolicy{}, errorResponse - } return accessControlPolicy{}, BodyToErrorResponse(resp.Body) } } + + // Decode access control policy. policy := accessControlPolicy{} err = xmlDecoder(resp.Body, &policy) if err != nil { return accessControlPolicy{}, err } - // In-case of google private bucket policy doesn't have any Grant list. - if a.config.Region == "google" { + + // If Google private bucket policy doesn't have any Grant list. + if isGoogleEndpoint(a.endpointURL) { return policy, nil } if policy.AccessControlList.Grant == nil { errorResponse := ErrorResponse{ Code: "InternalError", Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues.", - Resource: separator + bucketName, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -243,13 +323,21 @@ func (a s3API) getBucketACL(bucketName string) (accessControlPolicy, error) { } // getBucketLocationRequest wrapper creates a new getBucketLocation request. -func (a s3API) getBucketLocationRequest(bucket string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + "?location", - } - req, err := newRequest(op, a.config, requestMetadata{}) +func (a API) getBucketLocationRequest(bucketName string) (*Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := a.endpointURL + targetURL.Path = filepath.Join(bucketName, "") + targetURL.RawQuery = urlValues.Encode() + + // Instantiate a new request. + req, err := newRequest("GET", targetURL, requestMetadata{ + bucketRegion: "us-east-1", + credentials: a.credentials, + }) if err != nil { return nil, err } @@ -257,11 +345,14 @@ func (a s3API) getBucketLocationRequest(bucket string) (*Request, error) { } // getBucketLocation uses location subresource to return a bucket's region. -func (a s3API) getBucketLocation(bucketName string) (string, error) { +func (a API) getBucketLocation(bucketName string) (string, error) { + // Initialize a new request. req, err := a.getBucketLocationRequest(bucketName) if err != nil { return "", err } + + // Initiate the request. resp, err := req.Do() defer closeResp(resp) if err != nil { @@ -272,41 +363,60 @@ func (a s3API) getBucketLocation(bucketName string) (string, error) { return "", BodyToErrorResponse(resp.Body) } } + + // Extract location. var locationConstraint string err = xmlDecoder(resp.Body, &locationConstraint) if err != nil { return "", err } + + // location is empty will be 'us-east-1'. + if locationConstraint == "" { + return "us-east-1", nil + } + + // location can be 'EU' convert it to meaningful 'eu-west-1'. + if locationConstraint == "EU" { + return "eu-west-1", nil + } + + // return location. return locationConstraint, nil } // listObjectsRequest wrapper creates a new listObjects request. -func (a s3API) listObjectsRequest(bucket, marker, prefix, delimiter string, maxkeys int) (*Request, error) { - // resourceQuery - get resources properly escaped and lined up before using them in http request. - resourceQuery := func() (*string, error) { - switch { - case marker != "": - marker = fmt.Sprintf("&marker=%s", getURLEncodedPath(marker)) - fallthrough - case prefix != "": - prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix)) - fallthrough - case delimiter != "": - delimiter = fmt.Sprintf("&delimiter=%s", delimiter) - } - query := fmt.Sprintf("?max-keys=%d", maxkeys) + marker + prefix + delimiter - return &query, nil - } - query, err := resourceQuery() +func (a API) listObjectsRequest(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (*Request, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + // Set object prefix. + urlValues.Set("prefix", urlEncodePath(objectPrefix)) + // Set object marker. + urlValues.Set("marker", urlEncodePath(objectMarker)) + // Set delimiter. + urlValues.Set("delimiter", delimiter) + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Get target url. + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues) if err != nil { return nil, err } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + *query, + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err } - r, err := newRequest(op, a.config, requestMetadata{}) + + // Initialize a new request. + r, err := newRequest("GET", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) if err != nil { return nil, err } @@ -324,11 +434,14 @@ func (a s3API) listObjectsRequest(bucket, marker, prefix, delimiter string, maxk // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (a s3API) listObjects(bucketName, marker, prefix, delimiter string, maxkeys int) (listBucketResult, error) { - if !isValidBucketName(bucketName) { - return listBucketResult{}, ErrInvalidBucketName() +func (a API) listObjects(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) { + if err := isValidBucketName(bucketName); err != nil { + return listBucketResult{}, err + } + if err := isValidObjectPrefix(objectPrefix); err != nil { + return listBucketResult{}, err } - req, err := a.listObjectsRequest(bucketName, marker, prefix, delimiter, maxkeys) + req, err := a.listObjectsRequest(bucketName, objectPrefix, objectMarker, delimiter, maxkeys) if err != nil { return listBucketResult{}, err } @@ -339,10 +452,6 @@ func (a s3API) listObjects(bucketName, marker, prefix, delimiter string, maxkeys } if resp != nil { if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusMovedPermanently { - errorResponse := a.handleStatusMovedPermanently(resp, bucketName, "") - return listBucketResult{}, errorResponse - } return listBucketResult{}, BodyToErrorResponse(resp.Body) } } @@ -356,19 +465,29 @@ func (a s3API) listObjects(bucketName, marker, prefix, delimiter string, maxkeys } // headBucketRequest wrapper creates a new headBucket request. -func (a s3API) headBucketRequest(bucketName string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "HEAD", - HTTPPath: separator + bucketName, +func (a API) headBucketRequest(bucketName string) (*Request, error) { + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", url.Values{}) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + + return newRequest("HEAD", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) } // headBucket useful to determine if a bucket exists and you have permission to access it. -func (a s3API) headBucket(bucketName string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() +func (a API) headBucket(bucketName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err } req, err := a.headBucketRequest(bucketName) if err != nil { @@ -379,19 +498,16 @@ func (a s3API) headBucket(bucketName string) error { if err != nil { return err } - var resource = separator + bucketName if resp != nil { if resp.StatusCode != http.StatusOK { // Head has no response body, handle it. var errorResponse ErrorResponse switch resp.StatusCode { - case http.StatusMovedPermanently: - errorResponse = a.handleStatusMovedPermanently(resp, bucketName, "") case http.StatusNotFound: errorResponse = ErrorResponse{ Code: "NoSuchBucket", Message: "The specified bucket does not exist.", - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -400,7 +516,7 @@ func (a s3API) headBucket(bucketName string) error { errorResponse = ErrorResponse{ Code: "AccessDenied", Message: "Access Denied.", - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -409,7 +525,7 @@ func (a s3API) headBucket(bucketName string) error { errorResponse = ErrorResponse{ Code: resp.Status, Message: resp.Status, - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -422,13 +538,23 @@ func (a s3API) headBucket(bucketName string) error { } // deleteBucketRequest wrapper creates a new deleteBucket request. -func (a s3API) deleteBucketRequest(bucketName string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "DELETE", - HTTPPath: separator + bucketName, +func (a API) deleteBucketRequest(bucketName string) (*Request, error) { + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", url.Values{}) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + + return newRequest("DELETE", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) } // deleteBucket deletes the bucket name. @@ -436,9 +562,9 @@ func (a s3API) deleteBucketRequest(bucketName string) (*Request, error) { // NOTE: - // All objects (including all object versions and delete markers) // in the bucket must be deleted before successfully attempting this request. -func (a s3API) deleteBucket(bucketName string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() +func (a API) deleteBucket(bucketName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err } req, err := a.deleteBucketRequest(bucketName) if err != nil { @@ -449,18 +575,15 @@ func (a s3API) deleteBucket(bucketName string) error { if err != nil { return err } - var resource = separator + bucketName if resp != nil { if resp.StatusCode != http.StatusNoContent { var errorResponse ErrorResponse switch resp.StatusCode { - case http.StatusMovedPermanently: - errorResponse = a.handleStatusMovedPermanently(resp, bucketName, "") case http.StatusNotFound: errorResponse = ErrorResponse{ Code: "NoSuchBucket", Message: "The specified bucket does not exist.", - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -469,7 +592,7 @@ func (a s3API) deleteBucket(bucketName string) error { errorResponse = ErrorResponse{ Code: "AccessDenied", Message: "Access Denied.", - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -478,7 +601,7 @@ func (a s3API) deleteBucket(bucketName string) error { errorResponse = ErrorResponse{ Code: "Conflict", Message: "Bucket not empty.", - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -487,7 +610,7 @@ func (a s3API) deleteBucket(bucketName string) error { errorResponse = ErrorResponse{ Code: resp.Status, Message: resp.Status, - Resource: resource, + BucketName: bucketName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -502,23 +625,37 @@ func (a s3API) deleteBucket(bucketName string) error { /// Object Read/Write/Stat Operations // putObjectRequest wrapper creates a new PutObject request. -func (a s3API) putObjectRequest(bucketName, objectName string, putObjMetadata putObjectMetadata) (*Request, error) { +func (a API) putObjectRequest(bucketName, objectName string, putObjMetadata putObjectMetadata) (*Request, error) { + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) + if err != nil { + return nil, err + } if strings.TrimSpace(putObjMetadata.ContentType) == "" { putObjMetadata.ContentType = "application/octet-stream" } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucketName + separator + objectName, + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err } + + // Set headers. + putObjMetadataHeader := make(http.Header) + putObjMetadataHeader.Set("Content-Type", putObjMetadata.ContentType) + + // Populate request metadata. rmetadata := requestMetadata{ - body: putObjMetadata.ReadCloser, + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + contentBody: putObjMetadata.ReadCloser, contentLength: putObjMetadata.Size, - contentType: putObjMetadata.ContentType, - sha256PayloadBytes: putObjMetadata.Sha256Sum, - md5SumPayloadBytes: putObjMetadata.MD5Sum, + contentHeader: putObjMetadataHeader, + contentSha256Bytes: putObjMetadata.Sha256Sum, + contentMD5Bytes: putObjMetadata.MD5Sum, } - r, err := newRequest(op, a.config, rmetadata) + r, err := newRequest("PUT", targetURL, rmetadata) if err != nil { return nil, err } @@ -527,7 +664,7 @@ func (a s3API) putObjectRequest(bucketName, objectName string, putObjMetadata pu // putObject - add an object to a bucket. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (a s3API) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) { +func (a API) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) { req, err := a.putObjectRequest(bucketName, objectName, putObjMetadata) if err != nil { return ObjectStat{}, err @@ -539,10 +676,6 @@ func (a s3API) putObject(bucketName, objectName string, putObjMetadata putObject } if resp != nil { if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusMovedPermanently { - errorResponse := a.handleStatusMovedPermanently(resp, bucketName, objectName) - return ObjectStat{}, errorResponse - } return ObjectStat{}, BodyToErrorResponse(resp.Body) } } @@ -552,23 +685,44 @@ func (a s3API) putObject(bucketName, objectName string, putObjMetadata putObject } // presignedPostPolicy - generate post form data. -func (a s3API) presignedPostPolicy(p *PostPolicy) map[string]string { +func (a API) presignedPostPolicy(p *PostPolicy) (map[string]string, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, p.formData["bucket"], "", url.Values{}) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(p.formData["bucket"]) + if err != nil { + return nil, err + } + + // Instantiate a new request. + req, err := newRequest("POST", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return nil, err + } + + // Keep time. t := time.Now().UTC() - r := new(Request) - r.config = a.config - if r.config.Signature.isV2() { + if req.credentials.Signature.isV2() { policyBase64 := p.base64() p.formData["policy"] = policyBase64 // for all other regions set this value to be 'AWSAccessKeyId'. - if r.config.Region != "google" { - p.formData["AWSAccessKeyId"] = r.config.AccessKeyID + if isGoogleEndpoint(a.endpointURL) { + p.formData["GoogleAccessId"] = req.credentials.AccessKeyID } else { - p.formData["GoogleAccessId"] = r.config.AccessKeyID + p.formData["AWSAccessKeyId"] = req.credentials.AccessKeyID } - p.formData["signature"] = r.PostPresignSignatureV2(policyBase64) - return p.formData + p.formData["signature"] = req.PostPresignSignatureV2(policyBase64) + return p.formData, nil } - credential := getCredential(r.config.AccessKeyID, r.config.Region, t) + credential := getCredential(req.credentials.AccessKeyID, req.bucketRegion, t) p.addNewPolicy(policyCondition{ matchType: "eq", condition: "$x-amz-date", @@ -589,88 +743,112 @@ func (a s3API) presignedPostPolicy(p *PostPolicy) map[string]string { p.formData["x-amz-algorithm"] = authHeader p.formData["x-amz-credential"] = credential p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - p.formData["x-amz-signature"] = r.PostPresignSignatureV4(policyBase64, t) - return p.formData + p.formData["x-amz-signature"] = req.PostPresignSignatureV4(policyBase64, t) + return p.formData, nil } // presignedPutObject - generate presigned PUT url. -func (a s3API) presignedPutObject(bucketName, objectName string, expires int64) (string, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucketName + separator + objectName, - } - r, err := newPresignedRequest(op, a.config, expires) +func (a API) presignedPutObject(bucketName, objectName string, expires int64) (string, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) if err != nil { return "", err } - if r.config.Signature.isV2() { - return r.PreSignV2() - } - return r.PreSignV4() -} -// presignedGetObjectRequest - presigned get object request -func (a s3API) presignedGetObjectRequest(bucketName, objectName string, expires, offset, length int64) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucketName + separator + objectName, + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return "", err } - r, err := newPresignedRequest(op, a.config, expires) + + // Instantiate a new request. + req, err := newRequest("PUT", targetURL, requestMetadata{ + credentials: a.credentials, + expires: expires, + userAgent: a.userAgent, + bucketRegion: region, + }) if err != nil { - return nil, err + return "", err } - switch { - case length > 0 && offset > 0: - r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - case offset > 0 && length == 0: - r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - case length > 0 && offset == 0: - r.Set("Range", fmt.Sprintf("bytes=-%d", length)) + if req.credentials.Signature.isV2() { + return req.PreSignV2() } - return r, nil + return req.PreSignV4() } // presignedGetObject - generate presigned get object URL. -func (a s3API) presignedGetObject(bucketName, objectName string, expires, offset, length int64) (string, error) { - if !isValidBucketName(bucketName) { - return "", ErrInvalidBucketName() - } - if !isValidObjectName(objectName) { - return "", ErrInvalidObjectName() +func (a API) presignedGetObject(bucketName, objectName string, expires, offset, length int64) (string, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) + if err != nil { + return "", err } - r, err := a.presignedGetObjectRequest(bucketName, objectName, expires, offset, length) + + // get bucket region. + region, err := a.getRegion(bucketName) if err != nil { return "", err } - if r.config.Signature.isV2() { - return r.PreSignV2() + + // Instantiate a new request. + req, err := newRequest("GET", targetURL, requestMetadata{ + credentials: a.credentials, + expires: expires, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return "", err } - return r.PreSignV4() + + // Set ranges if length and offset are valid. + if length > 0 && offset >= 0 { + req.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else if offset > 0 && length == 0 { + req.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 && offset == 0 { + req.Set("Range", fmt.Sprintf("bytes=-%d", length)) + } + if req.credentials.Signature.isV2() { + return req.PreSignV2() + } + return req.PreSignV4() } // getObjectRequest wrapper creates a new getObject request. -func (a s3API) getObjectRequest(bucketName, objectName string, offset, length int64) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucketName + separator + objectName, +func (a API) getObjectRequest(bucketName, objectName string, offset, length int64) (*Request, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) + if err != nil { + return nil, err } - r, err := newRequest(op, a.config, requestMetadata{}) + + // get bucket region. + region, err := a.getRegion(bucketName) if err != nil { return nil, err } - switch { - case length > 0 && offset >= 0: - r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - case offset > 0 && length == 0: - r.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - // The final length bytes - case length < 0 && offset == 0: - r.Set("Range", fmt.Sprintf("bytes=%d", length)) + + // Instantiate a new request. + req, err := newRequest("GET", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return nil, err } - return r, nil + + // Set ranges if length and offset are valid. + if length > 0 && offset >= 0 { + req.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else if offset > 0 && length == 0 { + req.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length < 0 && offset == 0 { + req.Set("Range", fmt.Sprintf("bytes=%d", length)) + } + return req, nil } // getObject - retrieve object from Object Storage. @@ -680,12 +858,12 @@ func (a s3API) getObjectRequest(bucketName, objectName string, offset, length in // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (a s3API) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) { - if !isValidBucketName(bucketName) { - return nil, ObjectStat{}, ErrInvalidBucketName() +func (a API) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) { + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectStat{}, err } - if !isValidObjectName(objectName) { - return nil, ObjectStat{}, ErrInvalidObjectName() + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectStat{}, err } req, err := a.getObjectRequest(bucketName, objectName, offset, length) if err != nil { @@ -696,15 +874,7 @@ func (a s3API) getObject(bucketName, objectName string, offset, length int64) (i return nil, ObjectStat{}, err } if resp != nil { - switch resp.StatusCode { - // for HTTP status 200 and 204 are valid cases. - case http.StatusOK: - case http.StatusPartialContent: - // handle 301 sepcifically in case of wrong regions during path style. - case http.StatusMovedPermanently: - errorResponse := a.handleStatusMovedPermanently(resp, bucketName, objectName) - return nil, ObjectStat{}, errorResponse - default: + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { return nil, ObjectStat{}, BodyToErrorResponse(resp.Body) } } @@ -735,22 +905,38 @@ func (a s3API) getObject(bucketName, objectName string, offset, length int64) (i } // deleteObjectRequest wrapper creates a new deleteObject request. -func (a s3API) deleteObjectRequest(bucketName, objectName string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "DELETE", - HTTPPath: separator + bucketName + separator + objectName, +func (a API) deleteObjectRequest(bucketName, objectName string) (*Request, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + + // Instantiate a new request. + req, err := newRequest("DELETE", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + return req, nil } // deleteObject deletes a given object from a bucket. -func (a s3API) deleteObject(bucketName, objectName string) error { - if !isValidBucketName(bucketName) { - return ErrInvalidBucketName() +func (a API) deleteObject(bucketName, objectName string) error { + if err := isValidBucketName(bucketName); err != nil { + return err } - if !isValidObjectName(objectName) { - return ErrInvalidObjectName() + if err := isValidObjectName(objectName); err != nil { + return err } req, err := a.deleteObjectRequest(bucketName, objectName) if err != nil { @@ -768,22 +954,40 @@ func (a s3API) deleteObject(bucketName, objectName string) error { } // headObjectRequest wrapper creates a new headObject request. -func (a s3API) headObjectRequest(bucketName, objectName string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "HEAD", - HTTPPath: separator + bucketName + separator + objectName, +func (a API) headObjectRequest(bucketName, objectName string) (*Request, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + + // Instantiate a new request. + req, err := newRequest("HEAD", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + + // Return new request. + return req, nil } // headObject retrieves metadata for an object without returning the object itself. -func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { - if !isValidBucketName(bucketName) { - return ObjectStat{}, ErrInvalidBucketName() +func (a API) headObject(bucketName, objectName string) (ObjectStat, error) { + if err := isValidBucketName(bucketName); err != nil { + return ObjectStat{}, err } - if !isValidObjectName(objectName) { - return ObjectStat{}, ErrInvalidObjectName() + if err := isValidObjectName(objectName); err != nil { + return ObjectStat{}, err } req, err := a.headObjectRequest(bucketName, objectName) if err != nil { @@ -794,18 +998,16 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { if err != nil { return ObjectStat{}, err } - var resource = separator + bucketName + separator + objectName if resp != nil { if resp.StatusCode != http.StatusOK { var errorResponse ErrorResponse switch resp.StatusCode { - case http.StatusMovedPermanently: - errorResponse = a.handleStatusMovedPermanently(resp, bucketName, objectName) case http.StatusNotFound: errorResponse = ErrorResponse{ Code: "NoSuchKey", Message: "The specified key does not exist.", - Resource: resource, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -814,7 +1016,8 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { errorResponse = ErrorResponse{ Code: "AccessDenied", Message: "Access Denied.", - Resource: resource, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -823,7 +1026,8 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { errorResponse = ErrorResponse{ Code: resp.Status, Message: resp.Status, - Resource: resource, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -839,7 +1043,8 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { return ObjectStat{}, ErrorResponse{ Code: "InternalError", Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues.", - Resource: resource, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -850,7 +1055,8 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { return ObjectStat{}, ErrorResponse{ Code: "InternalError", Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.", - Resource: resource, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -860,7 +1066,7 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { if contentType == "" { contentType = "application/octet-stream" } - + // Save object metadata info. var objectstat ObjectStat objectstat.ETag = md5sum objectstat.Key = objectName @@ -873,17 +1079,26 @@ func (a s3API) headObject(bucketName, objectName string) (ObjectStat, error) { /// Service Operations. // listBucketRequest wrapper creates a new listBuckets request. -func (a s3API) listBucketsRequest() (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator, +func (a API) listBucketsRequest() (*Request, error) { + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, "", "", url.Values{}) + if err != nil { + return nil, err + } + // Instantiate a new request. + req, err := newRequest("GET", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: "us-east-1", + }) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + return req, nil } // listBuckets list of all buckets owned by the authenticated sender of the request. -func (a s3API) listBuckets() (listAllMyBucketsResult, error) { +func (a API) listBuckets() (listAllMyBucketsResult, error) { req, err := a.listBucketsRequest() if err != nil { return listAllMyBucketsResult{}, err diff --git a/s3-multipart-api.go b/s3-multipart-api.go index cb402f965d..7b39839508 100644 --- a/s3-multipart-api.go +++ b/s3-multipart-api.go @@ -22,43 +22,46 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" "strconv" ) // listMultipartUploadsRequest wrapper creates a new listMultipartUploads request. -func (a s3API) listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (*Request, error) { - // resourceQuery get resources properly escaped and lined up before using them in http request. - resourceQuery := func() (string, error) { - switch { - case keymarker != "": - keymarker = fmt.Sprintf("&key-marker=%s", getURLEncodedPath(keymarker)) - fallthrough - case uploadIDMarker != "": - uploadIDMarker = fmt.Sprintf("&upload-id-marker=%s", uploadIDMarker) - fallthrough - case prefix != "": - prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix)) - fallthrough - case delimiter != "": - delimiter = fmt.Sprintf("&delimiter=%s", delimiter) - } - query := fmt.Sprintf("?uploads&max-uploads=%d", maxuploads) + keymarker + uploadIDMarker + prefix + delimiter - return query, nil - } - query, err := resourceQuery() +func (a API) listMultipartUploadsRequest(bucketName, keyMarker, uploadIDMarker, + prefix, delimiter string, maxUploads int) (*Request, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + urlValues.Set("key-marker", urlEncodePath(keyMarker)) + // Set upload id marker. + urlValues.Set("upload-id-marker", uploadIDMarker) + // Set prefix marker. + urlValues.Set("prefix", urlEncodePath(prefix)) + // Set delimiter. + urlValues.Set("delimiter", delimiter) + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues) if err != nil { return nil, err } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + query, - } - r, err := newRequest(op, a.config, requestMetadata{}) + + // get bucket region. + region, err := a.getRegion(bucketName) if err != nil { return nil, err } - return r, nil + + // Instantiate a new request. + return newRequest("GET", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) } // listMultipartUploads - (List Multipart Uploads). @@ -72,8 +75,10 @@ func (a s3API) listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, pr // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (a s3API) listMultipartUploads(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (listMultipartUploadsResult, error) { - req, err := a.listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter, maxuploads) +func (a API) listMultipartUploads(bucketName, keyMarker, + uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) { + req, err := a.listMultipartUploadsRequest(bucketName, + keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) if err != nil { return listMultipartUploadsResult{}, err } @@ -96,18 +101,42 @@ func (a s3API) listMultipartUploads(bucket, keymarker, uploadIDMarker, prefix, d } // initiateMultipartRequest wrapper creates a new initiateMultiPart request. -func (a s3API) initiateMultipartRequest(bucket, object string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "POST", - HTTPPath: separator + bucket + separator + object + "?uploads", +func (a API) initiateMultipartRequest(bucketName, objectName, contentType string) (*Request, error) { + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + + if contentType == "" { + contentType = "application/octet-stream" } - return newRequest(op, a.config, requestMetadata{}) + // set ContentType header. + multipartHeader := make(http.Header) + multipartHeader.Set("Content-Type", contentType) + + rmetadata := requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + contentHeader: multipartHeader, + } + return newRequest("POST", targetURL, rmetadata) } // initiateMultipartUpload initiates a multipart upload and returns an upload ID. -func (a s3API) initiateMultipartUpload(bucket, object string) (initiateMultipartUploadResult, error) { - req, err := a.initiateMultipartRequest(bucket, object) +func (a API) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { + req, err := a.initiateMultipartRequest(bucketName, objectName, contentType) if err != nil { return initiateMultipartUploadResult{}, err } @@ -130,32 +159,48 @@ func (a s3API) initiateMultipartUpload(bucket, object string) (initiateMultipart } // completeMultipartUploadRequest wrapper creates a new CompleteMultipartUpload request. -func (a s3API) completeMultipartUploadRequest(bucket, object, uploadID string, complete completeMultipartUpload) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "POST", - HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID, +func (a API) completeMultipartUploadRequest(bucketName, objectName, uploadID string, + complete completeMultipartUpload) (*Request, error) { + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues) + if err != nil { + return nil, err } completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { return nil, err } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes) rmetadata := requestMetadata{ - body: ioutil.NopCloser(completeMultipartUploadBuffer), + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + contentBody: ioutil.NopCloser(completeMultipartUploadBuffer), contentLength: int64(completeMultipartUploadBuffer.Len()), - sha256PayloadBytes: sum256(completeMultipartUploadBuffer.Bytes()), + contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()), } - r, err := newRequest(op, a.config, rmetadata) + req, err := newRequest("POST", targetURL, rmetadata) if err != nil { return nil, err } - return r, nil + return req, nil } // completeMultipartUpload completes a multipart upload by assembling previously uploaded parts. -func (a s3API) completeMultipartUpload(bucket, object, uploadID string, c completeMultipartUpload) (completeMultipartUploadResult, error) { - req, err := a.completeMultipartUploadRequest(bucket, object, uploadID, c) +func (a API) completeMultipartUpload(bucketName, objectName, uploadID string, + c completeMultipartUpload) (completeMultipartUploadResult, error) { + req, err := a.completeMultipartUploadRequest(bucketName, objectName, uploadID, c) if err != nil { return completeMultipartUploadResult{}, err } @@ -178,18 +223,37 @@ func (a s3API) completeMultipartUpload(bucket, object, uploadID string, c comple } // abortMultipartUploadRequest wrapper creates a new AbortMultipartUpload request. -func (a s3API) abortMultipartUploadRequest(bucket, object, uploadID string) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "DELETE", - HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID, +func (a API) abortMultipartUploadRequest(bucketName, objectName, uploadID string) (*Request, error) { + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + + req, err := newRequest("DELETE", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return nil, err + } + return req, nil } // abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted. -func (a s3API) abortMultipartUpload(bucket, object, uploadID string) error { - req, err := a.abortMultipartUploadRequest(bucket, object, uploadID) +func (a API) abortMultipartUpload(bucketName, objectName, uploadID string) error { + req, err := a.abortMultipartUploadRequest(bucketName, objectName, uploadID) if err != nil { return err } @@ -207,7 +271,8 @@ func (a s3API) abortMultipartUpload(bucket, object, uploadID string) error { errorResponse = ErrorResponse{ Code: "NoSuchUpload", Message: "The specified multipart upload does not exist.", - Resource: separator + bucket + separator + object, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -216,7 +281,8 @@ func (a s3API) abortMultipartUpload(bucket, object, uploadID string) error { errorResponse = ErrorResponse{ Code: "AccessDenied", Message: "Access Denied.", - Resource: separator + bucket + separator + object, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -225,7 +291,8 @@ func (a s3API) abortMultipartUpload(bucket, object, uploadID string) error { errorResponse = ErrorResponse{ Code: resp.Status, Message: "Unknown error, please report this at https://github.com/minio/minio-go-legacy/issues.", - Resource: separator + bucket + separator + object, + BucketName: bucketName, + Key: objectName, RequestID: resp.Header.Get("x-amz-request-id"), HostID: resp.Header.Get("x-amz-id-2"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), @@ -238,22 +305,37 @@ func (a s3API) abortMultipartUpload(bucket, object, uploadID string) error { } // listObjectPartsRequest wrapper creates a new ListObjectParts request. -func (a s3API) listObjectPartsRequest(bucket, object, uploadID string, partNumberMarker, maxParts int) (*Request, error) { - // resourceQuery - get resources properly escaped and lined up before using them in http request. - resourceQuery := func() string { - var partNumberMarkerStr string - switch { - case partNumberMarker != 0: - partNumberMarkerStr = fmt.Sprintf("&part-number-marker=%d", partNumberMarker) - } - return fmt.Sprintf("?uploadId=%s&max-parts=%d", uploadID, maxParts) + partNumberMarkerStr +func (a API) listObjectPartsRequest(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (*Request, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues) + if err != nil { + return nil, err + } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err } - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "GET", - HTTPPath: separator + bucket + separator + object + resourceQuery(), + + req, err := newRequest("GET", targetURL, requestMetadata{ + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + }) + if err != nil { + return nil, err } - return newRequest(op, a.config, requestMetadata{}) + return req, nil } // listObjectParts (List Parts) @@ -263,8 +345,8 @@ func (a s3API) listObjectPartsRequest(bucket, object, uploadID string, partNumbe // request paramters :- // --------- // ?part-number-marker - Specifies the part after which listing should begin. -func (a s3API) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { - req, err := a.listObjectPartsRequest(bucket, object, uploadID, partNumberMarker, maxParts) +func (a API) listObjectParts(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { + req, err := a.listObjectPartsRequest(bucketName, objectName, uploadID, partNumberMarker, maxParts) if err != nil { return listObjectPartsResult{}, err } @@ -287,33 +369,48 @@ func (a s3API) listObjectParts(bucket, object, uploadID string, partNumberMarker } // uploadPartRequest wrapper creates a new UploadPart request. -func (a s3API) uploadPartRequest(bucket, object, uploadID string, uploadingPart partMetadata) (*Request, error) { - op := &operation{ - HTTPServer: a.config.Endpoint, - HTTPMethod: "PUT", - HTTPPath: separator + bucket + separator + object + - "?partNumber=" + strconv.Itoa(uploadingPart.Number) + "&uploadId=" + uploadID, +func (a API) uploadPartRequest(bucketName, objectName, uploadID string, uploadingPart partMetadata) (*Request, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(uploadingPart.Number)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // get targetURL. + targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues) + if err != nil { + return nil, err } + + // get bucket region. + region, err := a.getRegion(bucketName) + if err != nil { + return nil, err + } + rmetadata := requestMetadata{ - body: uploadingPart.ReadCloser, + credentials: a.credentials, + userAgent: a.userAgent, + bucketRegion: region, + contentBody: uploadingPart.ReadCloser, contentLength: uploadingPart.Size, - sha256PayloadBytes: uploadingPart.Sha256Sum, - md5SumPayloadBytes: uploadingPart.MD5Sum, + contentSha256Bytes: uploadingPart.Sha256Sum, + contentMD5Bytes: uploadingPart.MD5Sum, } - r, err := newRequest(op, a.config, rmetadata) + req, err := newRequest("PUT", targetURL, rmetadata) if err != nil { return nil, err } - return r, nil + return req, nil } // uploadPart uploads a part in a multipart upload. -func (a s3API) uploadPart(bucket, object, uploadID string, uploadingPart partMetadata) (completePart, error) { - req, err := a.uploadPartRequest(bucket, object, uploadID, uploadingPart) +func (a API) uploadPart(bucketName, objectName, uploadID string, uploadingPart partMetadata) (completePart, error) { + req, err := a.uploadPartRequest(bucketName, objectName, uploadID, uploadingPart) if err != nil { return completePart{}, err } - // initiate the request. resp, err := req.Do() defer closeResp(resp) diff --git a/utils.go b/utils.go deleted file mode 100644 index d195bffe94..0000000000 --- a/utils.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "regexp" - "strings" - "unicode/utf8" -) - -// isValidBucketName - verify bucket name in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func isValidBucketName(bucketName string) bool { - if strings.TrimSpace(bucketName) == "" { - return false - } - if len(bucketName) < 3 || len(bucketName) > 63 { - return false - } - if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { - return false - } - if match, _ := regexp.MatchString("\\.\\.", bucketName); match == true { - return false - } - // We don't support bucketNames with '.' in them - match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucketName) - return match -} - -// isValidObjectName - verify object name in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func isValidObjectName(objectName string) bool { - if strings.TrimSpace(objectName) == "" { - return false - } - if len(objectName) > 1024 || len(objectName) == 0 { - return false - } - if !utf8.ValidString(objectName) { - return false - } - return true -}