Browse Source

Merge pull request #104 from agin719/cos-v4-dev

fix multicopy and multiupload send on closed channel panic
master
agin719 4 years ago
committed by GitHub
parent
commit
b14f275335
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 30
      object.go
  2. 7
      object_part.go
  3. 2
      object_select.go

30
object.go

@ -513,7 +513,7 @@ func (s *ObjectService) DeleteMulti(ctx context.Context, opt *ObjectDeleteMultiO
type Object struct { type Object struct {
Key string `xml:",omitempty"` Key string `xml:",omitempty"`
ETag string `xml:",omitempty"` ETag string `xml:",omitempty"`
Size int `xml:",omitempty"`
Size int64 `xml:",omitempty"`
PartNumber int `xml:",omitempty"` PartNumber int `xml:",omitempty"`
LastModified string `xml:",omitempty"` LastModified string `xml:",omitempty"`
StorageClass string `xml:",omitempty"` StorageClass string `xml:",omitempty"`
@ -836,35 +836,43 @@ func (s *ObjectService) Upload(ctx context.Context, name string, filepath string
}() }()
// 5.Recv the resp etag to complete // 5.Recv the resp etag to complete
err = nil
for i := 0; i < partNum; i++ { for i := 0; i < partNum; i++ {
if chunks[i].Done { if chunks[i].Done {
optcom.Parts = append(optcom.Parts, Object{ optcom.Parts = append(optcom.Parts, Object{
PartNumber: chunks[i].Number, ETag: chunks[i].ETag}, PartNumber: chunks[i].Number, ETag: chunks[i].ETag},
) )
consumedBytes += chunks[i].Size
event = newProgressEvent(ProgressDataEvent, chunks[i].Size, consumedBytes, totalBytes)
progressCallback(listener, event)
if err == nil {
consumedBytes += chunks[i].Size
event = newProgressEvent(ProgressDataEvent, chunks[i].Size, consumedBytes, totalBytes)
progressCallback(listener, event)
}
continue continue
} }
res := <-chresults res := <-chresults
// Notice one part fail can not get the etag according. // Notice one part fail can not get the etag according.
if res.Resp == nil || res.err != nil { if res.Resp == nil || res.err != nil {
// Some part already fail, can not to get the header inside. // Some part already fail, can not to get the header inside.
err := fmt.Errorf("UploadID %s, part %d failed to get resp content. error: %s", uploadID, res.PartNumber, res.err.Error())
event = newProgressEvent(ProgressFailedEvent, 0, consumedBytes, totalBytes, err)
progressCallback(listener, event)
return nil, nil, err
err = fmt.Errorf("UploadID %s, part %d failed to get resp content. error: %s", uploadID, res.PartNumber, res.err.Error())
continue
} }
// Notice one part fail can not get the etag according. // Notice one part fail can not get the etag according.
etag := res.Resp.Header.Get("ETag") etag := res.Resp.Header.Get("ETag")
optcom.Parts = append(optcom.Parts, Object{ optcom.Parts = append(optcom.Parts, Object{
PartNumber: res.PartNumber, ETag: etag}, PartNumber: res.PartNumber, ETag: etag},
) )
consumedBytes += chunks[res.PartNumber-1].Size
event = newProgressEvent(ProgressDataEvent, chunks[res.PartNumber-1].Size, consumedBytes, totalBytes)
progressCallback(listener, event)
if err == nil {
consumedBytes += chunks[res.PartNumber-1].Size
event = newProgressEvent(ProgressDataEvent, chunks[res.PartNumber-1].Size, consumedBytes, totalBytes)
progressCallback(listener, event)
}
} }
close(chresults) close(chresults)
if err != nil {
event = newProgressEvent(ProgressFailedEvent, 0, consumedBytes, totalBytes, err)
progressCallback(listener, event)
return nil, nil, err
}
sort.Sort(ObjectList(optcom.Parts)) sort.Sort(ObjectList(optcom.Parts))
event = newProgressEvent(ProgressCompletedEvent, 0, consumedBytes, totalBytes) event = newProgressEvent(ProgressCompletedEvent, 0, consumedBytes, totalBytes)

7
object_part.go

@ -10,6 +10,7 @@ import (
"net/url" "net/url"
"sort" "sort"
"strings" "strings"
"time"
) )
// InitiateMultipartUploadOptions is the option of InitateMultipartUpload // InitiateMultipartUploadOptions is the option of InitateMultipartUpload
@ -349,6 +350,7 @@ func copyworker(s *ObjectService, jobs <-chan *CopyJobs, results chan<- *CopyRes
results <- &copyres results <- &copyres
break break
} }
time.Sleep(10 * time.Millisecond)
continue continue
} }
results <- &copyres results <- &copyres
@ -389,7 +391,7 @@ func SplitCopyFileIntoChunks(totalBytes int64, partSize int64) ([]Chunk, int, er
return nil, 0, errors.New("Too many parts, out of 10000") return nil, 0, errors.New("Too many parts, out of 10000")
} }
} else { } else {
partNum, partSize = DividePart(totalBytes, 64)
partNum, partSize = DividePart(totalBytes, 128)
} }
var chunks []Chunk var chunks []Chunk
@ -483,13 +485,12 @@ func (s *ObjectService) MultiCopy(ctx context.Context, name string, sourceURL st
} }
close(chjobs) close(chjobs)
}() }()
err = nil err = nil
for i := 0; i < partNum; i++ { for i := 0; i < partNum; i++ {
res := <-chresults res := <-chresults
if res.res == nil || res.err != nil { if res.res == nil || res.err != nil {
err = fmt.Errorf("UploadID %s, part %d failed to get resp content. error: %s", uploadID, res.PartNumber, res.err.Error()) err = fmt.Errorf("UploadID %s, part %d failed to get resp content. error: %s", uploadID, res.PartNumber, res.err.Error())
break
continue
} }
etag := res.res.ETag etag := res.res.ETag
optcom.Parts = append(optcom.Parts, Object{ optcom.Parts = append(optcom.Parts, Object{

2
object_select.go

@ -15,7 +15,7 @@ import (
) )
type JSONInputSerialization struct { type JSONInputSerialization struct {
Type string `xml:"Type"`
Type string `xml:"Type,omitempty"`
} }
type CSVInputSerialization struct { type CSVInputSerialization struct {

Loading…
Cancel
Save