123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487 |
- package minio
- import (
- "bytes"
- "context"
- "encoding/base64"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "sort"
- "strings"
- "github.com/google/uuid"
- "github.com/minio/minio-go/v7/pkg/s3utils"
- )
- func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
- reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
- if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
-
- info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
- } else {
- info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
- }
- if err != nil {
- errResp := ToErrorResponse(err)
-
-
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
-
- if size > maxSinglePutObjectSize {
- return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
-
- return c.putObject(ctx, bucketName, objectName, reader, size, opts)
- }
- }
- return info, err
- }
- type uploadedPartRes struct {
- Error error
- PartNum int
- Size int64
- Part ObjectPart
- }
- type uploadPartReq struct {
- PartNum int
- Part ObjectPart
- }
- func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
- reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
-
- if err = s3utils.CheckValidBucketName(bucketName); err != nil {
- return UploadInfo{}, err
- }
- if err = s3utils.CheckValidObjectName(objectName); err != nil {
- return UploadInfo{}, err
- }
-
- totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
- if err != nil {
- return UploadInfo{}, err
- }
-
- uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
- if err != nil {
- return UploadInfo{}, err
- }
-
-
-
-
- defer func() {
- if err != nil {
- c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
- }
- }()
-
- var totalUploadedSize int64
-
- var complMultipartUpload completeMultipartUpload
-
-
-
- uploadPartsCh := make(chan uploadPartReq, 10000)
-
-
-
- uploadedPartsCh := make(chan uploadedPartRes, 10000)
-
- lastPartNumber := totalPartsCount
-
- for p := 1; p <= totalPartsCount; p++ {
- uploadPartsCh <- uploadPartReq{PartNum: p}
- }
- close(uploadPartsCh)
- var partsBuf = make([][]byte, opts.getNumThreads())
- for i := range partsBuf {
- partsBuf[i] = make([]byte, 0, partSize)
- }
-
- for w := 1; w <= opts.getNumThreads(); w++ {
- go func(w int, partSize int64) {
-
- for uploadReq := range uploadPartsCh {
-
-
-
- readOffset := int64(uploadReq.PartNum-1) * partSize
-
-
- if uploadReq.PartNum == lastPartNumber {
- readOffset = (size - lastPartSize)
- partSize = lastPartSize
- }
- n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize])
- if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
- uploadedPartsCh <- uploadedPartRes{
- Error: rerr,
- }
-
- return
- }
-
- hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress)
-
- objPart, err := c.uploadPart(ctx, bucketName, objectName,
- uploadID, hookReader, uploadReq.PartNum,
- "", "", partSize, opts.ServerSideEncryption)
- if err != nil {
- uploadedPartsCh <- uploadedPartRes{
- Error: err,
- }
-
- return
- }
-
- uploadReq.Part = objPart
-
- uploadedPartsCh <- uploadedPartRes{
- Size: objPart.Size,
- PartNum: uploadReq.PartNum,
- Part: uploadReq.Part,
- }
- }
- }(w, partSize)
- }
-
-
- for u := 1; u <= totalPartsCount; u++ {
- uploadRes := <-uploadedPartsCh
- if uploadRes.Error != nil {
- return UploadInfo{}, uploadRes.Error
- }
-
- totalUploadedSize += uploadRes.Size
-
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
- ETag: uploadRes.Part.ETag,
- PartNumber: uploadRes.Part.PartNumber,
- })
- }
-
- if totalUploadedSize != size {
- return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
-
- sort.Sort(completedParts(complMultipartUpload.Parts))
- uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
- if err != nil {
- return UploadInfo{}, err
- }
- uploadInfo.Size = totalUploadedSize
- return uploadInfo, nil
- }
- func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
- reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
-
- if err = s3utils.CheckValidBucketName(bucketName); err != nil {
- return UploadInfo{}, err
- }
- if err = s3utils.CheckValidObjectName(objectName); err != nil {
- return UploadInfo{}, err
- }
-
- totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
- if err != nil {
- return UploadInfo{}, err
- }
-
- uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
- if err != nil {
- return UploadInfo{}, err
- }
-
-
-
-
- defer func() {
- if err != nil {
- c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
- }
- }()
-
- var totalUploadedSize int64
-
- partsInfo := make(map[int]ObjectPart)
-
- buf := make([]byte, partSize)
-
- var md5Base64 string
- var hookReader io.Reader
-
- var partNumber int
- for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
-
- if partNumber == totalPartsCount {
- partSize = lastPartSize
- }
- if opts.SendContentMd5 {
- length, rerr := readFull(reader, buf)
- if rerr == io.EOF && partNumber > 1 {
- break
- }
- if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
- return UploadInfo{}, rerr
- }
-
- hash := c.md5Hasher()
- hash.Write(buf[:length])
- md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
- hash.Close()
-
-
- hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress)
- } else {
-
-
- hookReader = newHook(reader, opts.Progress)
- }
- objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID,
- io.LimitReader(hookReader, partSize),
- partNumber, md5Base64, "", partSize, opts.ServerSideEncryption)
- if uerr != nil {
- return UploadInfo{}, uerr
- }
-
- partsInfo[partNumber] = objPart
-
- totalUploadedSize += partSize
- }
-
- if size > 0 {
- if totalUploadedSize != size {
- return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
- }
-
- var complMultipartUpload completeMultipartUpload
-
-
- for i := 1; i < partNumber; i++ {
- part, ok := partsInfo[i]
- if !ok {
- return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
- }
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
- ETag: part.ETag,
- PartNumber: part.PartNumber,
- })
- }
-
- sort.Sort(completedParts(complMultipartUpload.Parts))
- uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
- if err != nil {
- return UploadInfo{}, err
- }
- uploadInfo.Size = totalUploadedSize
- return uploadInfo, nil
- }
- func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
-
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return UploadInfo{}, err
- }
- if err := s3utils.CheckValidObjectName(objectName); err != nil {
- return UploadInfo{}, err
- }
-
-
- if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
- return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName)
- }
- if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
- return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
- }
- if size > 0 {
- if isReadAt(reader) && !isObject(reader) {
- seeker, ok := reader.(io.Seeker)
- if ok {
- offset, err := seeker.Seek(0, io.SeekCurrent)
- if err != nil {
- return UploadInfo{}, errInvalidArgument(err.Error())
- }
- reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
- }
- }
- }
- var md5Base64 string
- if opts.SendContentMd5 {
-
- buf := make([]byte, size)
- length, rErr := readFull(reader, buf)
- if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
- return UploadInfo{}, rErr
- }
-
- hash := c.md5Hasher()
- hash.Write(buf[:length])
- md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
- reader = bytes.NewReader(buf[:length])
- hash.Close()
- }
-
-
- readSeeker := newHook(reader, opts.Progress)
-
-
- return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts)
- }
- func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
-
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return UploadInfo{}, err
- }
- if err := s3utils.CheckValidObjectName(objectName); err != nil {
- return UploadInfo{}, err
- }
-
- customHeader := opts.Header()
-
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Base64: md5Base64,
- contentSHA256Hex: sha256Hex,
- }
- if opts.Internal.SourceVersionID != "" {
- if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
- return UploadInfo{}, errInvalidArgument(err.Error())
- }
- urlValues := make(url.Values)
- urlValues.Set("versionId", opts.Internal.SourceVersionID)
- reqMetadata.queryValues = urlValues
- }
-
- resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return UploadInfo{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
- return UploadInfo{
- Bucket: bucketName,
- Key: objectName,
- ETag: trimEtag(resp.Header.Get("ETag")),
- VersionID: resp.Header.Get(amzVersionID),
- Size: size,
- Expiration: expTime,
- ExpirationRuleID: ruleID,
- }, nil
- }
|