diff --git a/config/blobs.yml b/config/blobs.yml
index d4314934..f629d7cb 100644
--- a/config/blobs.yml
+++ b/config/blobs.yml
@@ -1,3 +1,6 @@
go1.20.10.linux-amd64.tar.gz:
size: 100663296
sha: sha256:80d34f1fd74e382d86c2d6102e0e60d4318461a7c2f457ec1efc4042752d4248
+go1.21.4.linux-amd64.tar.gz:
+ size: 66615271
+ sha: sha256:73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af
diff --git a/go1.21.4.linux-amd64.tar.gz b/go1.21.4.linux-amd64.tar.gz
new file mode 100644
index 00000000..2f2001e9
Binary files /dev/null and b/go1.21.4.linux-amd64.tar.gz differ
diff --git a/src/bosh-alicloud-cpi/action/create_stemcell.go b/src/bosh-alicloud-cpi/action/create_stemcell.go
index de570f3a..2ed1e566 100644
--- a/src/bosh-alicloud-cpi/action/create_stemcell.go
+++ b/src/bosh-alicloud-cpi/action/create_stemcell.go
@@ -111,11 +111,11 @@ func (a StemcellProps) GetDiskGB() int {
}
// create stemcell policy:
-// 1. stemcell metadate set in cloudProps
-// 2. we provide three ways to create stemcell
-// 2.1 region-imageId format, return Stemcell directly
-// 2.2 source URL, put the raw image on OSS, then create image by the oss url
-// 2.3 local tarball, upload the tarball on OSS, the create image by the oss url
+// 1. stemcell metadate set in cloudProps
+// 2. we provide three ways to create stemcell
+// 2.1 region-imageId format, return Stemcell directly
+// 2.2 source URL, put the raw image on OSS, then create image by the oss url
+// 2.3 local tarball, upload the tarball on OSS, the create image by the oss url
func (a CreateStemcellMethod) CreateStemcell(imagePath string, cloudProps apiv1.StemcellCloudProps) (apiv1.StemcellCID, error) {
var props StemcellProps
var stemcellId string
diff --git a/src/bosh-alicloud-cpi/action/create_vm.go b/src/bosh-alicloud-cpi/action/create_vm.go
index b39f2000..29e92c7e 100755
--- a/src/bosh-alicloud-cpi/action/create_vm.go
+++ b/src/bosh-alicloud-cpi/action/create_vm.go
@@ -30,7 +30,6 @@ const (
PostPaid = InstanceChargeType("PostPaid")
)
-//
// Instance properties: ref to docs/bosh/alicloud-cpi.md
// spot https://help.aliyun.com/knowledge_detail/48269.html
// ram profile https://help.aliyun.com/document_detail/54579.html?spm=5176.doc25481.6.797.UVS7aB
diff --git a/src/bosh-alicloud-cpi/action/set_vm_metadata.go b/src/bosh-alicloud-cpi/action/set_vm_metadata.go
index 30555ac2..6f4fcb92 100755
--- a/src/bosh-alicloud-cpi/action/set_vm_metadata.go
+++ b/src/bosh-alicloud-cpi/action/set_vm_metadata.go
@@ -78,7 +78,6 @@ func convertMetaData(input MetaInput) (MetaData, error) {
return r, nil
}
-//
// InstanceName ref https://help.aliyun.com/document_detail/25503.html
func normalizeName(s string, prefix string) string {
r := ""
@@ -110,7 +109,6 @@ func normalizeName(s string, prefix string) string {
return r
}
-//
// Tag.Key Tag.Name
// ref https://help.aliyun.com/document_detail/25616.html
func normalizeTag(s string) string {
diff --git a/src/bosh-alicloud-cpi/alicloud/instance_manager.go b/src/bosh-alicloud-cpi/alicloud/instance_manager.go
index bd0a8c35..cb85506a 100644
--- a/src/bosh-alicloud-cpi/alicloud/instance_manager.go
+++ b/src/bosh-alicloud-cpi/alicloud/instance_manager.go
@@ -27,7 +27,6 @@ var CreateInstanceCatcher_IpUsed2 = Catcher{"InvalidIPAddress.AlreadyUsed", 30,
var NetworkInterfaceInvalidOperationInvalidEniStateCacher = Catcher{"InvalidOperation.InvalidEniState", 60, 5}
var NlbBindServerCatcher_Conflict_Lock = Catcher{"Conflict.Lock", 15, 3}
-
const (
ChangeInstanceStatusTimeout = time.Duration(600) * time.Second
ChangeInstanceStatusSleepInterval = time.Duration(5) * time.Second
diff --git a/src/bosh-alicloud-cpi/alicloud/network_manager.go b/src/bosh-alicloud-cpi/alicloud/network_manager.go
index 3ba4bfa2..2f9c0626 100644
--- a/src/bosh-alicloud-cpi/alicloud/network_manager.go
+++ b/src/bosh-alicloud-cpi/alicloud/network_manager.go
@@ -6,9 +6,10 @@ package alicloud
import (
"encoding/json"
"fmt"
- util "github.com/alibabacloud-go/tea-utils/service"
"strings"
+ util "github.com/alibabacloud-go/tea-utils/service"
+
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/slb"
@@ -177,11 +178,11 @@ func (a NetworkManagerImpl) BindNlbServerGroup(region, instanceId string, nlbSer
return err
}
request := map[string]interface{}{
- "Servers.1.Port": port,
- "ServerGroupId": nlbServerGroupId,
- "Servers.1.ServerId": instanceId,
+ "Servers.1.Port": port,
+ "ServerGroupId": nlbServerGroupId,
+ "Servers.1.ServerId": instanceId,
"Servers.1.ServerType": "Ecs",
- "ClientToken": buildClientToken("AddServersToServerGroup"),
+ "ClientToken": buildClientToken("AddServersToServerGroup"),
}
if weight != 0 {
request["Servers.1.Weight"] = weight
@@ -198,7 +199,7 @@ func (a NetworkManagerImpl) BindNlbServerGroup(region, instanceId string, nlbSer
})
return err
}
-//
+
// TODO: add retry
func (a NetworkManagerImpl) BindSLB(region, instanceId string, slbId string, weight int) error {
client, err := a.config.NewSlbClient(region)
diff --git a/src/bosh-alicloud-cpi/alicloud/stemcell_manager.go b/src/bosh-alicloud-cpi/alicloud/stemcell_manager.go
index a704d320..16063b76 100644
--- a/src/bosh-alicloud-cpi/alicloud/stemcell_manager.go
+++ b/src/bosh-alicloud-cpi/alicloud/stemcell_manager.go
@@ -183,7 +183,7 @@ func (a StemcellManagerImpl) WaitForImageReady(id string) error {
return a.WaitForImage(region, id, WaitForImageReadyTimeout)
}
-//Wait Image ready
+// Wait Image ready
func (a StemcellManagerImpl) WaitForImage(regionId, imageId string, timeout int) error {
if timeout <= 0 {
timeout = DefaultWaitForImageReadyTimeout
diff --git a/src/bosh-alicloud-cpi/go.mod b/src/bosh-alicloud-cpi/go.mod
index 620ae4ff..21209e32 100644
--- a/src/bosh-alicloud-cpi/go.mod
+++ b/src/bosh-alicloud-cpi/go.mod
@@ -3,15 +3,15 @@ module bosh-alicloud-cpi
go 1.20
require (
- github.com/alibabacloud-go/tea v1.1.20
+ github.com/alibabacloud-go/tea v1.2.1
github.com/alibabacloud-go/tea-rpc v1.3.3
github.com/alibabacloud-go/tea-utils v1.4.5
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190830085952-7a8078751366
- github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5
+ github.com/aliyun/aliyun-oss-go-sdk v3.0.1+incompatible
github.com/aliyun/credentials-go v1.2.7
github.com/cloudfoundry/bosh-utils v0.0.407
github.com/cppforlife/bosh-cpi-go v0.0.0-20180718174221-526823bbeafd
- github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c
+ github.com/google/uuid v1.4.0
github.com/onsi/ginkgo v1.2.0
github.com/onsi/gomega v1.27.10
)
@@ -23,13 +23,13 @@ require (
github.com/charlievieth/fs v0.0.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
- github.com/json-iterator/go v1.1.10 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
- gopkg.in/ini.v1 v1.56.0 // indirect
+ gopkg.in/ini.v1 v1.66.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/src/bosh-alicloud-cpi/go.sum b/src/bosh-alicloud-cpi/go.sum
index 58660134..1c86e42d 100644
--- a/src/bosh-alicloud-cpi/go.sum
+++ b/src/bosh-alicloud-cpi/go.sum
@@ -4,8 +4,8 @@ github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9Q
github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
-github.com/alibabacloud-go/tea v1.1.20 h1:wFK4xEbvGYMtzTyHhIju9D7ecWxvSUdoLO6y4vDLFik=
-github.com/alibabacloud-go/tea v1.1.20/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
+github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask=
+github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA=
github.com/alibabacloud-go/tea-rpc v1.3.3 h1:NZJtukZouR0jpN0dWeBB5bMZdVvTyRPyISxc/hfOALo=
github.com/alibabacloud-go/tea-rpc v1.3.3/go.mod h1:zwKwxuf92liNsPcLOxPdrkvR5Dq6jtX2du6qx8FT094=
github.com/alibabacloud-go/tea-rpc-utils v1.1.2 h1:ZTfFREnP2q9D49T2J/1jYYOndepGdrUOgm/JR8/bIQ0=
@@ -15,12 +15,16 @@ github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOq
github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190830085952-7a8078751366 h1:/5EJG2/jjtZzJFmfcJjgXLEZ0VRkTBhwe9FyBloegL4=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190830085952-7a8078751366/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
-github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c=
+github.com/aliyun/alibaba-cloud-sdk-go v1.61.1 h1:AQLG+LufYiDwCoS1sYvBKIhLr12R1fhUmMY5gWB7o+I=
+github.com/aliyun/alibaba-cloud-sdk-go v1.61.1/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk=
+github.com/aliyun/alibaba-cloud-sdk-go v1.61.119 h1:leglv6sWCOEPmUjvm+pt4XIxtqSz60HtycZoEp12K5Y=
+github.com/aliyun/alibaba-cloud-sdk-go v1.61.119/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/aliyun/aliyun-oss-go-sdk v3.0.1+incompatible h1:so4m5rRA32Tc5GgKg/5gKUu0CRsYmVO3ThMP6T3CwLc=
+github.com/aliyun/aliyun-oss-go-sdk v3.0.1+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw=
github.com/aliyun/credentials-go v1.2.7 h1:gLtFylxLZ1TWi1pStIt1O6a53GFU1zkNwjtJir2B4ow=
github.com/aliyun/credentials-go v1.2.7/go.mod h1:/KowD1cfGSLrLsH28Jr8W+xwoId0ywIy5lNzDz6O1vw=
-github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
@@ -41,17 +45,16 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ=
-github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c h1:jWtZjFEUE/Bz0IeIhqCnyZ3HG6KRXSntXe4SjtuTH7c=
-github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -61,8 +64,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
@@ -73,13 +77,10 @@ github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0=
github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -88,27 +89,56 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
@@ -117,6 +147,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -127,8 +159,9 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.56.0 h1:DPMeDvGTM54DXbPkVIZsp19fp/I2K7zwA/itHYHKo8Y=
gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
+gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/alibabacloud-go/tea/tea/tea.go b/src/bosh-alicloud-cpi/vendor/github.com/alibabacloud-go/tea/tea/tea.go
index ec60f129..c984caf8 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/alibabacloud-go/tea/tea/tea.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/alibabacloud-go/tea/tea/tea.go
@@ -262,7 +262,7 @@ func Convert(in interface{}, out interface{}) error {
return err
}
-// Convert is use convert map[string]interface object to struct
+// Recover is used to format error
func Recover(in interface{}) error {
if in == nil {
return nil
@@ -322,6 +322,9 @@ func DoRequest(request *Request, requestRuntime map[string]interface{}) (respons
requestURL := ""
request.Domain = request.Headers["host"]
+ if request.Port != nil {
+ request.Domain = String(fmt.Sprintf("%s:%d", StringValue(request.Domain), IntValue(request.Port)))
+ }
requestURL = fmt.Sprintf("%s://%s%s", StringValue(request.Protocol), StringValue(request.Domain), StringValue(request.Pathname))
queryParams := request.Query
// sort QueryParams by key
@@ -412,28 +415,30 @@ func getHttpTransport(req *Request, runtime *RuntimeObject) (*http.Transport, er
if err != nil {
return nil, err
}
- if strings.ToLower(*req.Protocol) == "https" &&
- runtime.Key != nil && runtime.Cert != nil {
- cert, err := tls.X509KeyPair([]byte(StringValue(runtime.Cert)), []byte(StringValue(runtime.Key)))
- if err != nil {
- return nil, err
- }
-
- trans.TLSClientConfig = &tls.Config{
- Certificates: []tls.Certificate{cert},
- InsecureSkipVerify: BoolValue(runtime.IgnoreSSL),
- }
- if runtime.CA != nil {
- clientCertPool := x509.NewCertPool()
- ok := clientCertPool.AppendCertsFromPEM([]byte(StringValue(runtime.CA)))
- if !ok {
- return nil, errors.New("Failed to parse root certificate")
+ if strings.ToLower(*req.Protocol) == "https" {
+ if BoolValue(runtime.IgnoreSSL) != true {
+ trans.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: false,
+ }
+ if runtime.Key != nil && runtime.Cert != nil && StringValue(runtime.Key) != "" && StringValue(runtime.Cert) != "" {
+ cert, err := tls.X509KeyPair([]byte(StringValue(runtime.Cert)), []byte(StringValue(runtime.Key)))
+ if err != nil {
+ return nil, err
+ }
+ trans.TLSClientConfig.Certificates = []tls.Certificate{cert}
+ }
+ if runtime.CA != nil && StringValue(runtime.CA) != "" {
+ clientCertPool := x509.NewCertPool()
+ ok := clientCertPool.AppendCertsFromPEM([]byte(StringValue(runtime.CA)))
+ if !ok {
+ return nil, errors.New("Failed to parse root certificate")
+ }
+ trans.TLSClientConfig.RootCAs = clientCertPool
+ }
+ } else {
+ trans.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: true,
}
- trans.TLSClientConfig.RootCAs = clientCertPool
- }
- } else {
- trans.TLSClientConfig = &tls.Config{
- InsecureSkipVerify: BoolValue(runtime.IgnoreSSL),
}
}
if httpProxy != nil {
@@ -473,6 +478,10 @@ func getHttpTransport(req *Request, runtime *RuntimeObject) (*http.Transport, er
} else {
trans.DialContext = setDialContext(runtime)
}
+ if runtime.MaxIdleConns != nil && *runtime.MaxIdleConns > 0 {
+ trans.MaxIdleConns = IntValue(runtime.MaxIdleConns)
+ trans.MaxIdleConnsPerHost = IntValue(runtime.MaxIdleConns)
+ }
return trans, nil
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE
new file mode 100644
index 00000000..d46e9d12
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2015 aliyun.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
index fad9f0c6..9cb9c3c7 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
@@ -4,7 +4,9 @@ import (
"bytes"
"crypto/hmac"
"crypto/sha1"
+ "crypto/sha256"
"encoding/base64"
+ "encoding/hex"
"fmt"
"hash"
"io"
@@ -12,6 +14,7 @@ import (
"sort"
"strconv"
"strings"
+ "time"
)
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
@@ -20,27 +23,117 @@ type headerSorter struct {
Vals []string
}
+// getAdditionalHeaderKeys get exist key in http header
+func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
+ var keysList []string
+ keysMap := make(map[string]string)
+ srcKeys := make(map[string]string)
+
+ for k := range req.Header {
+ srcKeys[strings.ToLower(k)] = ""
+ }
+
+ for _, v := range conn.config.AdditionalHeaders {
+ if _, ok := srcKeys[strings.ToLower(v)]; ok {
+ keysMap[strings.ToLower(v)] = ""
+ }
+ }
+
+ for k := range keysMap {
+ keysList = append(keysList, k)
+ }
+ sort.Strings(keysList)
+ return keysList, keysMap
+}
+
+// getAdditionalHeaderKeysV4 get exist key in http header
+func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) {
+ var keysList []string
+ keysMap := make(map[string]string)
+ srcKeys := make(map[string]string)
+
+ for k := range req.Header {
+ srcKeys[strings.ToLower(k)] = ""
+ }
+
+ for _, v := range conn.config.AdditionalHeaders {
+ if _, ok := srcKeys[strings.ToLower(v)]; ok {
+ if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) {
+ keysMap[strings.ToLower(v)] = ""
+ }
+ }
+ }
+
+ for k := range keysMap {
+ keysList = append(keysList, k)
+ }
+ sort.Strings(keysList)
+ return keysList, keysMap
+}
+
// signHeader signs the header and sets it as the authorization header.
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
- // Get the final authorization string
- authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
+ akIf := conn.config.GetCredentials()
+ authorizationStr := ""
+ if conn.config.AuthVersion == AuthV4 {
+ strDay := ""
+ strDate := req.Header.Get(HttpHeaderOssDate)
+ if strDate == "" {
+ strDate = req.Header.Get(HTTPHeaderDate)
+ t, _ := time.Parse(http.TimeFormat, strDate)
+ strDay = t.Format("20060102")
+ } else {
+ t, _ := time.Parse(iso8601DateFormatSecond, strDate)
+ strDay = t.Format("20060102")
+ }
+
+ signHeaderProduct := conn.config.GetSignProduct()
+ signHeaderRegion := conn.config.GetSignRegion()
+
+ additionalList, _ := conn.getAdditionalHeaderKeysV4(req)
+ if len(additionalList) > 0 {
+ authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v"
+ additionnalHeadersStr := strings.Join(additionalList, ";")
+ authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret()))
+ } else {
+ authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v"
+ authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret()))
+ }
+ } else if conn.config.AuthVersion == AuthV2 {
+ additionalList, _ := conn.getAdditionalHeaderKeys(req)
+ if len(additionalList) > 0 {
+ authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v"
+ additionnalHeadersStr := strings.Join(additionalList, ";")
+ authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
+ } else {
+ authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v"
+ authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
+ }
+ } else {
+ // Get the final authorization string
+ authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
+ }
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
}
-func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
+func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
// Find out the "x-oss-"'s address in header of the request
- temp := make(map[string]string)
-
+ ossHeadersMap := make(map[string]string)
+ additionalList, additionalMap := conn.getAdditionalHeaderKeys(req)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
- temp[strings.ToLower(k)] = v[0]
+ ossHeadersMap[strings.ToLower(k)] = v[0]
+ } else if conn.config.AuthVersion == AuthV2 {
+ if _, ok := additionalMap[strings.ToLower(k)]; ok {
+ ossHeadersMap[strings.ToLower(k)] = v[0]
+ }
}
}
- hs := newHeaderSorter(temp)
+ hs := newHeaderSorter(ossHeadersMap)
- // Sort the temp by the ascending order
+ // Sort the ossHeadersMap by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
@@ -55,18 +148,140 @@ func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) s
contentType := req.Header.Get(HTTPHeaderContentType)
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
+ // default is v1 signature
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
+ h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
+
+ // v2 signature
+ if conn.config.AuthVersion == AuthV2 {
+ signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
+ h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
+ }
- conn.config.WriteLog(Debug, "[Req:%p]signStr:%s.\n", req, signStr)
+ if conn.config.LogLevel >= Debug {
+ conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
+ }
- h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
-func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, params map[string]interface{}) string {
+func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string) string {
+ // Find out the "x-oss-"'s address in header of the request
+ ossHeadersMap := make(map[string]string)
+ additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req)
+ for k, v := range req.Header {
+ if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
+ ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ")
+ } else {
+ if _, ok := additionalMap[strings.ToLower(k)]; ok {
+ ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ")
+ }
+ }
+ }
+
+ // Required parameters
+ signDate := ""
+ dateFormat := ""
+ date := req.Header.Get(HTTPHeaderDate)
+ if date != "" {
+ signDate = date
+ dateFormat = http.TimeFormat
+ }
+
+ ossDate := req.Header.Get(HttpHeaderOssDate)
+ _, ok := ossHeadersMap[strings.ToLower(HttpHeaderOssDate)]
+ if ossDate != "" {
+ signDate = ossDate
+ dateFormat = iso8601DateFormatSecond
+ if !ok {
+ ossHeadersMap[strings.ToLower(HttpHeaderOssDate)] = strings.Trim(ossDate, " ")
+ }
+ }
+
+ contentType := req.Header.Get(HTTPHeaderContentType)
+ _, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentType)]
+ if contentType != "" && !ok {
+ ossHeadersMap[strings.ToLower(HTTPHeaderContentType)] = strings.Trim(contentType, " ")
+ }
+
+ contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
+ _, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)]
+ if contentMd5 != "" && !ok {
+ ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)] = strings.Trim(contentMd5, " ")
+ }
+
+ hs := newHeaderSorter(ossHeadersMap)
+
+ // Sort the ossHeadersMap by the ascending order
+ hs.Sort()
+
+ // Get the canonicalizedOSSHeaders
+ canonicalizedOSSHeaders := ""
+ for i := range hs.Keys {
+ canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
+ }
+
+ signStr := ""
+
+ // v4 signature
+ hashedPayload := req.Header.Get(HttpHeaderOssContentSha256)
+
+ // subResource
+ resource := canonicalizedResource
+ subResource := ""
+ subPos := strings.LastIndex(canonicalizedResource, "?")
+ if subPos != -1 {
+ subResource = canonicalizedResource[subPos+1:]
+ resource = canonicalizedResource[0:subPos]
+ }
+
+ // get canonical request
+ canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload
+ rh := sha256.New()
+ io.WriteString(rh, canonicalReuqest)
+ hashedRequest := hex.EncodeToString(rh.Sum(nil))
+
+ if conn.config.LogLevel >= Debug {
+ conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(canonicalReuqest))
+ }
+
+ // get day,eg 20210914
+ t, _ := time.Parse(dateFormat, signDate)
+ strDay := t.Format("20060102")
+
+ signedStrV4Product := conn.config.GetSignProduct()
+ signedStrV4Region := conn.config.GetSignRegion()
+
+ signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest
+ if conn.config.LogLevel >= Debug {
+ conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
+ }
+
+ h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret))
+ io.WriteString(h1, strDay)
+ h1Key := h1.Sum(nil)
+
+ h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key)
+ io.WriteString(h2, signedStrV4Region)
+ h2Key := h2.Sum(nil)
+
+ h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key)
+ io.WriteString(h3, signedStrV4Product)
+ h3Key := h3.Sum(nil)
+
+ h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key)
+ io.WriteString(h4, "aliyun_v4_request")
+ h4Key := h4.Sum(nil)
+
+ h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key)
+ io.WriteString(h, signStr)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
if params[HTTPParamAccessKeyID] == nil {
return ""
}
@@ -88,7 +303,7 @@ func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string,
expireStr := strconv.FormatInt(expiration, 10)
signStr := expireStr + "\n" + canonParamsStr + canonResource
- h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
+ h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
index 067855e0..84dae999 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
@@ -2,6 +2,7 @@ package oss
import (
"bytes"
+ "context"
"crypto/md5"
"encoding/base64"
"encoding/xml"
@@ -9,6 +10,7 @@ import (
"hash"
"hash/crc64"
"io"
+ "io/ioutil"
"net/http"
"net/url"
"os"
@@ -28,13 +30,13 @@ type Bucket struct {
// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\".
// reader io.Reader instance for reading the data for uploading
// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding
-// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details.
-// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
-// error it's nil if no error, otherwise it's an error object.
+// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details.
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/putobject
//
+// error it's nil if no error, otherwise it's an error object.
func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
- opts := addContentType(options, objectKey)
+ opts := AddContentType(options, objectKey)
request := &PutObjectRequest{
ObjectKey: objectKey,
@@ -56,7 +58,6 @@ func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Op
// options the options for uploading the object. Refer to the parameter options in PutObject for more details.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
if err != nil {
@@ -64,7 +65,7 @@ func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Op
}
defer fd.Close()
- opts := addContentType(options, filePath, objectKey)
+ opts := AddContentType(options, filePath, objectKey)
request := &PutObjectRequest{
ObjectKey: objectKey,
@@ -86,30 +87,39 @@ func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Op
//
// Response the response from OSS.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
- isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
+ isOptSet, _, _ := IsOptionSet(options, HTTPHeaderContentType)
if !isOptSet {
- options = addContentType(options, request.ObjectKey)
+ options = AddContentType(options, request.ObjectKey)
}
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
params := map[string]interface{}{}
resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener)
if err != nil {
return nil, err
}
-
- if bucket.getConfig().IsEnableCRC {
- err = checkCRC(resp, "DoPutObject")
+ if bucket.GetConfig().IsEnableCRC {
+ err = CheckCRC(resp, "DoPutObject")
if err != nil {
return resp, err
}
}
-
- err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
-
+ err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+ body, _ := ioutil.ReadAll(resp.Body)
+ if len(body) > 0 {
+ if err != nil {
+ err = tryConvertServiceError(body, resp, err)
+ } else {
+ rb, _ := FindOption(options, responseBody, nil)
+ if rb != nil {
+ if rbody, ok := rb.(*[]byte); ok {
+ *rbody = body
+ }
+ }
+ }
+ }
return resp, err
}
@@ -117,12 +127,12 @@ func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*
//
// objectKey the object key.
// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch,
-// IfNoneMatch, AcceptEncoding. For more details, please check out:
-// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
+//
+// IfNoneMatch, AcceptEncoding. For more details, please check out:
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/getobject
//
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
@@ -139,7 +149,6 @@ func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadClos
// options the options for downloading the object. Refer to the parameter options in method GetObject for more details.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
@@ -164,15 +173,15 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
}
// Compares the CRC value
- hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
- encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
+ hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange)
+ encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil)
acceptEncoding := ""
if encodeOpt != nil {
acceptEncoding = encodeOpt.(string)
}
- if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
+ if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
result.Response.ClientCRC = result.ClientCRC.Sum64()
- err = checkCRC(result.Response, "GetObjectToFile")
+ err = CheckCRC(result.Response, "GetObjectToFile")
if err != nil {
os.Remove(tempFilePath)
return err
@@ -189,9 +198,8 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
//
// GetObjectResult the result instance of getting the object.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
- params, _ := getRawParams(options)
+ params, _ := GetRawParams(options)
resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil)
if err != nil {
return nil, err
@@ -203,15 +211,15 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
// CRC
var crcCalc hash.Hash64
- hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
- if bucket.getConfig().IsEnableCRC && !hasRange {
- crcCalc = crc64.New(crcTable())
+ hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange)
+ if bucket.GetConfig().IsEnableCRC && !hasRange {
+ crcCalc = crc64.New(CrcTable())
result.ServerCRC = resp.ServerCRC
result.ClientCRC = crcCalc
}
// Progress
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
@@ -224,16 +232,26 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
// srcObjectKey the source object to copy.
// destObjectKey the target object to copy.
// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch,
-// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
-// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires,
-// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
-// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
//
-// error it's nil if no error, otherwise it's an error object.
+// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
+// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires,
+// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/copyobject
//
+// error it's nil if no error, otherwise it's an error object.
func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
- options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+
+ //first find version id
+ versionIdKey := "versionId"
+ versionId, _ := FindOption(options, versionIdKey, nil)
+ if versionId == nil {
+ options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+ } else {
+ options = DeleteOption(options, versionIdKey)
+ options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string)))
+ }
+
params := map[string]interface{}{}
resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil)
if err != nil {
@@ -253,12 +271,10 @@ func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...O
// options copy options, check out parameter options in function CopyObject for more details.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
}
-//
// CopyObjectFrom copies the object to another bucket.
//
// srcBucketName source bucket name.
@@ -267,7 +283,6 @@ func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey st
// options copy options. Check out parameter options in function CopyObject.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
destBucketName := bucket.BucketName
var out CopyObjectResult
@@ -281,14 +296,38 @@ func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey s
func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
- options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+
+ //first find version id
+ versionIdKey := "versionId"
+ versionId, _ := FindOption(options, versionIdKey, nil)
+ if versionId == nil {
+ options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+ } else {
+ options = DeleteOption(options, versionIdKey)
+ options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string)))
+ }
+
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return out, err
}
params := map[string]interface{}{}
- resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil)
+
+ ctxArg, _ := FindOption(options, contextArg, nil)
+ ctx, _ := ctxArg.(context.Context)
+
+ resp, err := bucket.Client.Conn.DoWithContext(ctx, "PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil)
+
+ // get response header
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ pRespHeader := respHeader.(*http.Header)
+ if resp != nil {
+ *pRespHeader = resp.Headers
+ }
+ }
+
if err != nil {
return out, err
}
@@ -309,11 +348,11 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
// reader io.Reader. The read instance for reading the data to append.
// appendPosition the start position to append.
// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding,
-// Expires, ServerSideEncryption, ObjectACL.
+//
+// Expires, ServerSideEncryption, ObjectACL.
//
// int64 the next append position, it's valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
request := &AppendObjectRequest{
ObjectKey: objectKey,
@@ -336,27 +375,40 @@ func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosi
//
// AppendObjectResult the result object for appending object.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
params := map[string]interface{}{}
params["append"] = nil
params["position"] = strconv.FormatInt(request.Position, 10)
headers := make(map[string]string)
- opts := addContentType(options, request.ObjectKey)
+ opts := AddContentType(options, request.ObjectKey)
handleOptions(headers, opts)
var initCRC uint64
- isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64)
+ isCRCSet, initCRCOpt, _ := IsOptionSet(options, initCRC64)
if isCRCSet {
initCRC = initCRCOpt.(uint64)
}
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
handleOptions(headers, opts)
- resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers,
+
+ ctxArg, _ := FindOption(options, contextArg, nil)
+ ctx, _ := ctxArg.(context.Context)
+
+ resp, err := bucket.Client.Conn.DoWithContext(ctx, "POST", bucket.BucketName, request.ObjectKey, params, headers,
request.Reader, initCRC, listener)
+
+ // get response header
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ pRespHeader := respHeader.(*http.Header)
+ if resp != nil {
+ *pRespHeader = resp.Headers
+ }
+ }
+
if err != nil {
return nil, err
}
@@ -368,8 +420,8 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
CRC: resp.ServerCRC,
}
- if bucket.getConfig().IsEnableCRC && isCRCSet {
- err = checkCRC(resp, "AppendObject")
+ if bucket.GetConfig().IsEnableCRC && isCRCSet {
+ err = CheckCRC(resp, "AppendObject")
if err != nil {
return result, err
}
@@ -383,63 +435,106 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
// objectKey the object key to delete.
//
// error it's nil if no error, otherwise it's an error object.
-//
-func (bucket Bucket) DeleteObject(objectKey string) error {
- params := map[string]interface{}{}
- resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil)
+func (bucket Bucket) DeleteObject(objectKey string, options ...Option) error {
+ params, _ := GetRawParams(options)
+ resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// DeleteObjects deletes multiple objects.
//
// objectKeys the object keys to delete.
// options the options for deleting objects.
-// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
+//
+// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
//
// DeleteObjectsResult the result object.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
out := DeleteObjectsResult{}
dxml := deleteXML{}
for _, key := range objectKeys {
dxml.Objects = append(dxml.Objects, DeleteObject{Key: key})
}
- isQuiet, _ := findOption(options, deleteObjectsQuiet, false)
+ isQuiet, _ := FindOption(options, deleteObjectsQuiet, false)
dxml.Quiet = isQuiet.(bool)
+ xmlData := marshalDeleteObjectToXml(dxml)
+ body, err := bucket.DeleteMultipleObjectsXml(xmlData, options...)
+ if err != nil {
+ return out, err
+ }
+ deletedResult := DeleteObjectVersionsResult{}
+ if !dxml.Quiet {
+ if err = xmlUnmarshal(strings.NewReader(body), &deletedResult); err == nil {
+ err = decodeDeleteObjectsResult(&deletedResult)
+ }
+ }
+ // Keep compatibility:need convert to struct DeleteObjectsResult
+ out.XMLName = deletedResult.XMLName
+ for _, v := range deletedResult.DeletedObjectsDetail {
+ out.DeletedObjects = append(out.DeletedObjects, v.Key)
+ }
+ return out, err
+}
- bs, err := xml.Marshal(dxml)
+// DeleteObjectVersions deletes multiple object versions.
+//
+// objectVersions the object keys and versions to delete.
+// options the options for deleting objects.
+//
+// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
+//
+// DeleteObjectVersionsResult the result object.
+// error it's nil if no error, otherwise it's an error object.
+func (bucket Bucket) DeleteObjectVersions(objectVersions []DeleteObject, options ...Option) (DeleteObjectVersionsResult, error) {
+ out := DeleteObjectVersionsResult{}
+ dxml := deleteXML{}
+ dxml.Objects = objectVersions
+ isQuiet, _ := FindOption(options, deleteObjectsQuiet, false)
+ dxml.Quiet = isQuiet.(bool)
+ xmlData := marshalDeleteObjectToXml(dxml)
+ body, err := bucket.DeleteMultipleObjectsXml(xmlData, options...)
if err != nil {
return out, err
}
+ if !dxml.Quiet {
+ if err = xmlUnmarshal(strings.NewReader(body), &out); err == nil {
+ err = decodeDeleteObjectsResult(&out)
+ }
+ }
+ return out, err
+}
+
+// DeleteMultipleObjectsXml deletes multiple object or deletes multiple object versions.
+//
+// xmlData the object keys and versions to delete as the xml format.
+// options the options for deleting objects.
+//
+// string the result response body.
+// error it's nil if no error, otherwise it's an error.
+func (bucket Bucket) DeleteMultipleObjectsXml(xmlData string, options ...Option) (string, error) {
buffer := new(bytes.Buffer)
+ bs := []byte(xmlData)
buffer.Write(bs)
-
- contentType := http.DetectContentType(buffer.Bytes())
- options = append(options, ContentType(contentType))
+ options = append(options, ContentType("application/xml"))
sum := md5.Sum(bs)
b64 := base64.StdEncoding.EncodeToString(sum[:])
options = append(options, ContentMD5(b64))
-
params := map[string]interface{}{}
params["delete"] = nil
params["encoding-type"] = "url"
-
- resp, err := bucket.do("POST", "", params, options, buffer, nil)
+ resp, err := bucket.doInner("POST", "", params, options, buffer, nil)
if err != nil {
- return out, err
+ return "", err
}
defer resp.Body.Close()
- if !dxml.Quiet {
- if err = xmlUnmarshal(resp.Body, &out); err == nil {
- err = decodeDeleteObjectsResult(&out)
- }
- }
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
return out, err
}
@@ -448,9 +543,8 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
// bool flag of object's existence (true:exists; false:non-exist) when error is nil.
//
// error it's nil if no error, otherwise it's an error object.
-//
-func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
- _, err := bucket.GetObjectMeta(objectKey)
+func (bucket Bucket) IsObjectExist(objectKey string, options ...Option) (bool, error) {
+ _, err := bucket.GetObjectMeta(objectKey, options...)
if err == nil {
return true, nil
}
@@ -468,33 +562,33 @@ func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
// ListObjects lists the objects under the current bucket.
//
// options it contains all the filters for listing objects.
-// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names.
-// The key marker means the returned objects' key must be greater than it in lexicographic order.
//
-// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
-// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
-// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
-// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects.
-// The three filters could be used together to achieve filter and paging functionality.
-// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders).
-// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
-// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
-// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
+// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names.
+// The key marker means the returned objects' key must be greater than it in lexicographic order.
//
-// For common usage scenario, check out sample/list_object.go.
+// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
+// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
+// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
+// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects.
+// The three filters could be used together to achieve filter and paging functionality.
+// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders).
+// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
+// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
+// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
//
-// ListObjectsResponse the return value after operation succeeds (only valid when error is nil).
+// For common usage scenario, check out sample/list_object.go.
//
+// ListObjectsResult the return value after operation succeeds (only valid when error is nil).
func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
var out ListObjectsResult
options = append(options, EncodingType("url"))
- params, err := getRawParams(options)
+ params, err := GetRawParams(options)
if err != nil {
return out, err
}
- resp, err := bucket.do("GET", "", params, options, nil, nil)
+ resp, err := bucket.doInner("GET", "", params, options, nil, nil)
if err != nil {
return out, err
}
@@ -509,14 +603,68 @@ func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
return out, err
}
+// ListObjectsV2 lists the objects under the current bucket.
+// Recommend to use ListObjectsV2 to replace ListObjects
+// ListObjectsResultV2 the return value after operation succeeds (only valid when error is nil).
+func (bucket Bucket) ListObjectsV2(options ...Option) (ListObjectsResultV2, error) {
+ var out ListObjectsResultV2
+
+ options = append(options, EncodingType("url"))
+ options = append(options, ListType(2))
+ params, err := GetRawParams(options)
+ if err != nil {
+ return out, err
+ }
+
+ resp, err := bucket.doInner("GET", "", params, options, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ if err != nil {
+ return out, err
+ }
+
+ err = decodeListObjectsResultV2(&out)
+ return out, err
+}
+
+// ListObjectVersions lists objects of all versions under the current bucket.
+func (bucket Bucket) ListObjectVersions(options ...Option) (ListObjectVersionsResult, error) {
+ var out ListObjectVersionsResult
+
+ options = append(options, EncodingType("url"))
+ params, err := GetRawParams(options)
+ if err != nil {
+ return out, err
+ }
+ params["versions"] = nil
+
+ resp, err := bucket.doInner("GET", "", params, options, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ if err != nil {
+ return out, err
+ }
+
+ err = decodeListObjectVersionsResult(&out)
+ return out, err
+}
+
// SetObjectMeta sets the metadata of the Object.
//
// objectKey object
// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
-// ServerSideEncryption, and custom metadata.
//
-// error it's nil if no error, otherwise it's an error object.
+// ServerSideEncryption, and custom metadata.
//
+// error it's nil if no error, otherwise it's an error object.
func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
options = append(options, MetadataDirective(MetaReplace))
_, err := bucket.CopyObject(objectKey, objectKey, options...)
@@ -527,13 +675,13 @@ func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
//
// objectKey object key.
// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince,
-// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
+//
+// IfMatch, IfNoneMatch. For more details check out https://www.alibabacloud.com/help/en/object-storage-service/latest/headobject
//
// http.Header object meta when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
- params := map[string]interface{}{}
+ params, _ := GetRawParams(options)
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
if err != nil {
return nil, err
@@ -552,9 +700,8 @@ func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option)
//
// http.Header the object's metadata, valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) {
- params := map[string]interface{}{}
+ params, _ := GetRawParams(options)
params["objectMeta"] = nil
//resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
@@ -581,17 +728,16 @@ func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.He
// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL.
//
// error it's nil if no error, otherwise it's an error object.
-//
-func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
- options := []Option{ObjectACL(objectACL)}
- params := map[string]interface{}{}
+func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType, options ...Option) error {
+ options = append(options, ObjectACL(objectACL))
+ params, _ := GetRawParams(options)
params["acl"] = nil
resp, err := bucket.do("PUT", objectKey, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetObjectACL gets object's ACL
@@ -600,12 +746,11 @@ func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
//
// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL.
// error it's nil if no error, otherwise it's an error object.
-//
-func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
+func (bucket Bucket) GetObjectACL(objectKey string, options ...Option) (GetObjectACLResult, error) {
var out GetObjectACLResult
- params := map[string]interface{}{}
+ params, _ := GetRawParams(options)
params["acl"] = nil
- resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+ resp, err := bucket.do("GET", objectKey, params, options, nil, nil)
if err != nil {
return out, err
}
@@ -627,17 +772,16 @@ func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error)
// targetObjectKey the target object key to point to.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error {
options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey)))
- params := map[string]interface{}{}
+ params, _ := GetRawParams(options)
params["symlink"] = nil
resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetSymlink gets the symlink object with the specified key.
@@ -646,12 +790,12 @@ func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, opt
// objectKey the symlink object's key.
//
// error it's nil if no error, otherwise it's an error object.
-// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object.
//
-func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
- params := map[string]interface{}{}
+// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object.
+func (bucket Bucket) GetSymlink(objectKey string, options ...Option) (http.Header, error) {
+ params, _ := GetRawParams(options)
params["symlink"] = nil
- resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+ resp, err := bucket.do("GET", objectKey, params, options, nil, nil)
if err != nil {
return nil, err
}
@@ -677,16 +821,67 @@ func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
// objectKey object key to restore.
//
// error it's nil if no error, otherwise it's an error object.
-//
-func (bucket Bucket) RestoreObject(objectKey string) error {
- params := map[string]interface{}{}
+func (bucket Bucket) RestoreObject(objectKey string, options ...Option) error {
+ params, _ := GetRawParams(options)
+ params["restore"] = nil
+ resp, err := bucket.do("POST", objectKey, params, options, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
+}
+
+// RestoreObjectDetail support more features than RestoreObject
+func (bucket Bucket) RestoreObjectDetail(objectKey string, restoreConfig RestoreConfiguration, options ...Option) error {
+ if restoreConfig.Tier == "" {
+ // Expedited, Standard, Bulk
+ restoreConfig.Tier = string(RestoreStandard)
+ }
+
+ if restoreConfig.Days == 0 {
+ restoreConfig.Days = 1
+ }
+
+ bs, err := xml.Marshal(restoreConfig)
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ options = append(options, ContentType(contentType))
+
+ params, _ := GetRawParams(options)
+ params["restore"] = nil
+
+ resp, err := bucket.do("POST", objectKey, params, options, buffer, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
+}
+
+// RestoreObjectXML support more features than RestoreObject
+func (bucket Bucket) RestoreObjectXML(objectKey, configXML string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(configXML))
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ options = append(options, ContentType(contentType))
+
+ params, _ := GetRawParams(options)
params["restore"] = nil
- resp, err := bucket.do("POST", objectKey, params, nil, nil, nil)
+
+ resp, err := bucket.do("POST", objectKey, params, options, buffer, nil)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
}
// SignURL signs the URL. Users could access the object directly with this URL without getting the AK.
@@ -696,14 +891,18 @@ func (bucket Bucket) RestoreObject(objectKey string) error {
//
// string returns the signed URL, when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) {
+ err := CheckObjectName(objectKey)
+ if err != nil {
+ return "", err
+ }
+
if expiredInSec < 0 {
return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec)
}
expiration := time.Now().Unix() + expiredInSec
- params, err := getRawParams(options)
+ params, err := GetRawParams(options)
if err != nil {
return "", err
}
@@ -723,11 +922,11 @@ func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec i
// signedURL signed URL.
// reader io.Reader the read instance for reading the data for the upload.
// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding,
-// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details:
-// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
-// error it's nil if no error, otherwise it's an error object.
+// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details:
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/putobject
//
+// error it's nil if no error, otherwise it's an error object.
func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error {
resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options)
if err != nil {
@@ -746,7 +945,6 @@ func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, option
// options options for uploading, same as the options in PutObject function.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
if err != nil {
@@ -771,9 +969,8 @@ func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, option
//
// Response the response object which contains the HTTP response.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) {
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
params := map[string]interface{}{}
resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener)
@@ -781,14 +978,14 @@ func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, opti
return nil, err
}
- if bucket.getConfig().IsEnableCRC {
- err = checkCRC(resp, "DoPutObjectWithURL")
+ if bucket.GetConfig().IsEnableCRC {
+ err = CheckCRC(resp, "DoPutObjectWithURL")
if err != nil {
return resp, err
}
}
- err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
return resp, err
}
@@ -797,12 +994,12 @@ func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, opti
//
// signedURL the signed URL.
// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch,
-// IfNoneMatch, AcceptEncoding. For more information, check out the following link:
-// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
+//
+// IfNoneMatch, AcceptEncoding. For more information, check out the following link:
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/getobject
//
// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObjectWithURL(signedURL, options)
if err != nil {
@@ -818,7 +1015,6 @@ func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.R
// options the options for downloading object. Check out the parameter options in function GetObject for the reference.
//
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
@@ -843,16 +1039,16 @@ func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options
}
// Compare the CRC value. If CRC values do not match, return error.
- hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
- encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
+ hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange)
+ encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil)
acceptEncoding := ""
if encodeOpt != nil {
acceptEncoding = encodeOpt.(string)
}
- if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
+ if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
result.Response.ClientCRC = result.ClientCRC.Sum64()
- err = checkCRC(result.Response, "GetObjectToFileWithURL")
+ err = CheckCRC(result.Response, "GetObjectToFileWithURL")
if err != nil {
os.Remove(tempFilePath)
return err
@@ -869,9 +1065,8 @@ func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options
//
// GetObjectResult the result object when the error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
- params, _ := getRawParams(options)
+ params, _ := GetRawParams(options)
resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil)
if err != nil {
return nil, err
@@ -883,15 +1078,15 @@ func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*Ge
// CRC
var crcCalc hash.Hash64
- hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
- if bucket.getConfig().IsEnableCRC && !hasRange {
- crcCalc = crc64.New(crcTable())
+ hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange)
+ if bucket.GetConfig().IsEnableCRC && !hasRange {
+ crcCalc = crc64.New(CrcTable())
result.ServerCRC = resp.ServerCRC
result.ClientCRC = crcCalc
}
// Progress
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
@@ -899,21 +1094,18 @@ func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*Ge
return result, nil
}
-//
// ProcessObject apply process on the specified image file.
//
// The supported process includes resize, rotate, crop, watermark, format,
// udf, customized style, etc.
//
-//
// objectKey object key to process.
// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA"
//
// error it's nil if no error, otherwise it's an error object.
-//
-func (bucket Bucket) ProcessObject(objectKey string, process string) (ProcessObjectResult, error) {
+func (bucket Bucket) ProcessObject(objectKey string, process string, options ...Option) (ProcessObjectResult, error) {
var out ProcessObjectResult
- params := map[string]interface{}{}
+ params, _ := GetRawParams(options)
params["x-oss-process"] = nil
processData := fmt.Sprintf("%v=%v", "x-oss-process", process)
data := strings.NewReader(processData)
@@ -927,33 +1119,189 @@ func (bucket Bucket) ProcessObject(objectKey string, process string) (ProcessObj
return out, err
}
+// AsyncProcessObject apply async process on the specified image file.
+//
+// The supported process includes resize, rotate, crop, watermark, format,
+// udf, customized style, etc.
+//
+// objectKey object key to process.
+// asyncProcess process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA"
+//
+// error it's nil if no error, otherwise it's an error object.
+func (bucket Bucket) AsyncProcessObject(objectKey string, asyncProcess string, options ...Option) (AsyncProcessObjectResult, error) {
+ var out AsyncProcessObjectResult
+ params, _ := GetRawParams(options)
+ params["x-oss-async-process"] = nil
+ processData := fmt.Sprintf("%v=%v", "x-oss-async-process", asyncProcess)
+ data := strings.NewReader(processData)
+
+ resp, err := bucket.do("POST", objectKey, params, nil, data, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = jsonUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// PutObjectTagging add tagging to object
+//
+// objectKey object key to add tagging
+// tagging tagging to be added
+//
+// error nil if success, otherwise error
+func (bucket Bucket) PutObjectTagging(objectKey string, tagging Tagging, options ...Option) error {
+ bs, err := xml.Marshal(tagging)
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ params, _ := GetRawParams(options)
+ params["tagging"] = nil
+ resp, err := bucket.do("PUT", objectKey, params, options, buffer, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+//
+// GetObjectTagging get tagging of the object
+//
+// objectKey object key to get tagging
+//
+// Tagging
+// error nil if success, otherwise error
+
+func (bucket Bucket) GetObjectTagging(objectKey string, options ...Option) (GetObjectTaggingResult, error) {
+ var out GetObjectTaggingResult
+ params, _ := GetRawParams(options)
+ params["tagging"] = nil
+
+ resp, err := bucket.do("GET", objectKey, params, options, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// DeleteObjectTagging delete object taggging
+//
+// objectKey object key to delete tagging
+//
+// error nil if success, otherwise error
+func (bucket Bucket) DeleteObjectTagging(objectKey string, options ...Option) error {
+ params, _ := GetRawParams(options)
+ params["tagging"] = nil
+ resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+func (bucket Bucket) OptionsMethod(objectKey string, options ...Option) (http.Header, error) {
+ var out http.Header
+ resp, err := bucket.doInner("OPTIONS", objectKey, nil, options, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+ out = resp.Headers
+ return out, nil
+}
+
+// public
+func (bucket Bucket) Do(method, objectName string, params map[string]interface{}, options []Option,
+ data io.Reader, listener ProgressListener) (*Response, error) {
+ return bucket.doInner(method, objectName, params, options, data, listener)
+}
+
// Private
-func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
+func (bucket Bucket) doInner(method, objectName string, params map[string]interface{}, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return nil, err
}
- return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
+
+ err = CheckBucketName(bucket.BucketName)
+ if len(bucket.BucketName) > 0 && err != nil {
+ return nil, err
+ }
+
+ ctxArg, _ := FindOption(options, contextArg, nil)
+ ctx, _ := ctxArg.(context.Context)
+
+ resp, err := bucket.Client.Conn.DoWithContext(ctx, method, bucket.BucketName, objectName,
params, headers, data, 0, listener)
+
+ // get response header
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil && resp != nil {
+ pRespHeader := respHeader.(*http.Header)
+ if resp != nil {
+ *pRespHeader = resp.Headers
+ }
+ }
+
+ return resp, err
+}
+
+// Private check object name before bucket.do
+func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
+ data io.Reader, listener ProgressListener) (*Response, error) {
+ err := CheckObjectName(objectName)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := bucket.doInner(method, objectName, params, options, data, listener)
+ return resp, err
}
func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
+
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return nil, err
}
- return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener)
+
+ ctxArg, _ := FindOption(options, contextArg, nil)
+ ctx, _ := ctxArg.(context.Context)
+
+ resp, err := bucket.Client.Conn.DoURLWithContext(ctx, method, signedURL, headers, data, 0, listener)
+
+ // get response header
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ pRespHeader := respHeader.(*http.Header)
+ if resp != nil {
+ *pRespHeader = resp.Headers
+ }
+ }
+
+ return resp, err
}
-func (bucket Bucket) getConfig() *Config {
+func (bucket Bucket) GetConfig() *Config {
return bucket.Client.Config
}
-func addContentType(options []Option, keys ...string) []Option {
+func AddContentType(options []Option, keys ...string) []Option {
typ := TypeByExtension("")
for _, key := range keys {
typ = TypeByExtension(key)
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
index ff370f6d..c30d7955 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
@@ -7,7 +7,9 @@ import (
"encoding/xml"
"fmt"
"io"
+ "io/ioutil"
"log"
+ "net"
"net/http"
"strings"
"time"
@@ -47,7 +49,6 @@ func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption)
// URL parse
url := &urlMaker{}
- url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
// HTTP connect
conn := &Conn{config: config, url: url}
@@ -63,12 +64,42 @@ func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption)
option(client)
}
+ err := url.InitExt(config.Endpoint, config.IsCname, config.IsUseProxy, config.IsPathStyle)
+ if err != nil {
+ return nil, err
+ }
+
+ if config.AuthVersion != AuthV1 && config.AuthVersion != AuthV2 && config.AuthVersion != AuthV4 {
+ return nil, fmt.Errorf("Init client Error, invalid Auth version: %v", config.AuthVersion)
+ }
+
// Create HTTP connection
- err := conn.init(config, url, client.HTTPClient)
+ err = conn.init(config, url, client.HTTPClient)
return client, err
}
+// SetRegion set region for client
+//
+// region the region, such as cn-hangzhou
+func (client *Client) SetRegion(region string) {
+ client.Config.Region = region
+}
+
+// SetCloudBoxId set CloudBoxId for client
+//
+// cloudBoxId the id of cloudBox
+func (client *Client) SetCloudBoxId(cloudBoxId string) {
+ client.Config.CloudBoxId = cloudBoxId
+}
+
+// SetProduct set Product type for client
+//
+// Product product type
+func (client *Client) SetProduct(product string) {
+ client.Config.Product = product
+}
+
// Bucket gets the bucket instance.
//
// bucketName the bucket name.
@@ -77,6 +108,11 @@ func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption)
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) Bucket(bucketName string) (*Bucket, error) {
+ err := CheckBucketName(bucketName)
+ if err != nil {
+ return nil, err
+ }
+
return &Bucket{
client,
bucketName,
@@ -98,27 +134,58 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
buffer := new(bytes.Buffer)
- isOptSet, val, _ := isOptionSet(options, storageClass)
- if isOptSet {
- cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)}
- bs, err := xml.Marshal(cbConfig)
- if err != nil {
- return err
- }
- buffer.Write(bs)
+ var cbConfig createBucketConfiguration
+ cbConfig.StorageClass = StorageStandard
+
+ isStorageSet, valStroage, _ := IsOptionSet(options, storageClass)
+ isRedundancySet, valRedundancy, _ := IsOptionSet(options, redundancyType)
+ isObjectHashFuncSet, valHashFunc, _ := IsOptionSet(options, objectHashFunc)
+ if isStorageSet {
+ cbConfig.StorageClass = valStroage.(StorageClassType)
+ }
+
+ if isRedundancySet {
+ cbConfig.DataRedundancyType = valRedundancy.(DataRedundancyType)
+ }
+
+ if isObjectHashFuncSet {
+ cbConfig.ObjectHashFunction = valHashFunc.(ObjecthashFuncType)
+ }
- contentType := http.DetectContentType(buffer.Bytes())
- headers[HTTPHeaderContentType] = contentType
+ bs, err := xml.Marshal(cbConfig)
+ if err != nil {
+ return err
+ }
+ buffer.Write(bs)
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
}
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// create bucket xml
+func (client Client) CreateBucketXml(bucketName string, xmlBody string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
params := map[string]interface{}{}
- resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// ListBuckets lists buckets of the current account under the given endpoint, with optional filters.
@@ -134,12 +201,42 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
var out ListBucketsResult
- params, err := getRawParams(options)
+ params, err := GetRawParams(options)
+ if err != nil {
+ return out, err
+ }
+
+ resp, err := client.do("GET", "", params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// ListCloudBoxes lists cloud boxes of the current account under the given endpoint, with optional filters.
+//
+// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter.
+// And marker makes sure the returned buckets' name are greater than it in lexicographic order.
+// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000.
+// For the common usage scenario, please check out list_bucket.go in the sample.
+// ListBucketsResponse the response object if error is nil.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) ListCloudBoxes(options ...Option) (ListCloudBoxResult, error) {
+ var out ListCloudBoxResult
+
+ params, err := GetRawParams(options)
if err != nil {
return out, err
}
- resp, err := client.do("GET", "", params, nil, nil)
+ params["cloudboxes"] = nil
+
+ resp, err := client.do("GET", "", params, nil, nil, options...)
if err != nil {
return out, err
}
@@ -174,31 +271,31 @@ func (client Client) IsBucketExist(bucketName string) (bool, error) {
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) DeleteBucket(bucketName string) error {
+func (client Client) DeleteBucket(bucketName string, options ...Option) error {
params := map[string]interface{}{}
- resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLocation gets the bucket location.
//
// Checks out the following link for more information :
-// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/getbucketlocation
//
// bucketName the bucket name
//
// string bucket's datacenter location
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketLocation(bucketName string) (string, error) {
+func (client Client) GetBucketLocation(bucketName string, options ...Option) (string, error) {
params := map[string]interface{}{}
params["location"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
return "", err
}
@@ -216,15 +313,16 @@ func (client Client) GetBucketLocation(bucketName string) (string, error) {
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
+func (client Client) SetBucketACL(bucketName string, bucketACL ACLType, options ...Option) error {
headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
params := map[string]interface{}{}
- resp, err := client.do("PUT", bucketName, params, headers, nil)
+ params["acl"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, nil, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetBucketACL gets the bucket ACL.
@@ -234,11 +332,11 @@ func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
// GetBucketAclResponse the result object, and it's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
+func (client Client) GetBucketACL(bucketName string, options ...Option) (GetBucketACLResult, error) {
var out GetBucketACLResult
params := map[string]interface{}{}
params["acl"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
return out, err
}
@@ -251,7 +349,7 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
// SetBucketLifecycle sets the bucket's lifecycle.
//
// For more information, checks out following link:
-// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/putbucketlifecycle
//
// bucketName the bucket name.
// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively.
@@ -259,9 +357,13 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
- lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
- bs, err := xml.Marshal(lxml)
+func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule, options ...Option) error {
+ err := verifyLifecycleRules(rules)
+ if err != nil {
+ return err
+ }
+ lifecycleCfg := LifecycleConfiguration{Rules: rules}
+ bs, err := xml.Marshal(lifecycleCfg)
if err != nil {
return err
}
@@ -274,12 +376,31 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
params := map[string]interface{}{}
params["lifecycle"] = nil
- resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// SetBucketLifecycleXml sets the bucket's lifecycle rule from xml config
+func (client Client) SetBucketLifecycleXml(bucketName string, xmlBody string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["lifecycle"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketLifecycle deletes the bucket's lifecycle.
@@ -289,15 +410,15 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) DeleteBucketLifecycle(bucketName string) error {
+func (client Client) DeleteBucketLifecycle(bucketName string, options ...Option) error {
params := map[string]interface{}{}
params["lifecycle"] = nil
- resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLifecycle gets the bucket's lifecycle settings.
@@ -307,43 +428,65 @@ func (client Client) DeleteBucketLifecycle(bucketName string) error {
// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
+func (client Client) GetBucketLifecycle(bucketName string, options ...Option) (GetBucketLifecycleResult, error) {
var out GetBucketLifecycleResult
params := map[string]interface{}{}
params["lifecycle"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
+
+ // NonVersionTransition is not suggested to use
+ // to keep compatible
+ for k, rule := range out.Rules {
+ if len(rule.NonVersionTransitions) > 0 {
+ out.Rules[k].NonVersionTransition = &(out.Rules[k].NonVersionTransitions[0])
+ }
+ }
+ return out, err
+}
+
+func (client Client) GetBucketLifecycleXml(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["lifecycle"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
return out, err
}
// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer.
//
// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as
-// the allowing empty referrer flag. Note that this applies to requests from webbrowser only.
+// the allowing empty referrer flag. Note that this applies to requests from web browser only.
// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket.
// For more information, please check out this link :
-// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/putbucketreferer
//
// bucketName the bucket name.
-// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards.
+// referrers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards.
// The sample could be found in sample/bucket_referer.go
// allowEmptyReferer the flag of allowing empty referrer. By default it's true.
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
+func (client Client) SetBucketReferer(bucketName string, referrers []string, allowEmptyReferer bool, options ...Option) error {
rxml := RefererXML{}
rxml.AllowEmptyReferer = allowEmptyReferer
- if referers == nil {
+ if referrers == nil {
rxml.RefererList = append(rxml.RefererList, "")
} else {
- for _, referer := range referers {
- rxml.RefererList = append(rxml.RefererList, referer)
+ for _, referrer := range referrers {
+ rxml.RefererList = append(rxml.RefererList, referrer)
}
}
@@ -351,49 +494,81 @@ func (client Client) SetBucketReferer(bucketName string, referers []string, allo
if err != nil {
return err
}
- buffer := new(bytes.Buffer)
- buffer.Write(bs)
+ return client.PutBucketRefererXml(bucketName, string(bs), options...)
+}
+
+// SetBucketRefererV2 gets the bucket's referer white list.
+//
+// setBucketReferer SetBucketReferer bucket referer config in struct format.
+//
+// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketRefererV2(bucketName string, setBucketReferer RefererXML, options ...Option) error {
+ bs, err := xml.Marshal(setBucketReferer)
+ if err != nil {
+ return err
+ }
+ return client.PutBucketRefererXml(bucketName, string(bs), options...)
+}
+
+// PutBucketRefererXml set bucket's style
+// bucketName the bucket name.
+// xmlData the style in xml format
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketRefererXml(bucketName, xmlData string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlData))
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
params["referer"] = nil
- resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetBucketReferer gets the bucket's referrer white list.
-//
// bucketName the bucket name.
-//
-// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil.
+// GetBucketRefererResult the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
-//
-func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
+func (client Client) GetBucketReferer(bucketName string, options ...Option) (GetBucketRefererResult, error) {
var out GetBucketRefererResult
+ body, err := client.GetBucketRefererXml(bucketName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// GetBucketRefererXml gets the bucket's referrer white list.
+// bucketName the bucket name.
+// GetBucketRefererResponse the bucket referer config result in xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketRefererXml(bucketName string, options ...Option) (string, error) {
params := map[string]interface{}{}
params["referer"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
- return out, err
+ return "", err
}
defer resp.Body.Close()
-
- err = xmlUnmarshal(resp.Body, &out)
- return out, err
+ body, err := ioutil.ReadAll(resp.Body)
+ return string(body), err
}
// SetBucketLogging sets the bucket logging settings.
//
// OSS could automatically store the access log. Only the bucket owner could enable the logging.
// Once enabled, OSS would save all the access log into hourly log files in a specified bucket.
-// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
+// For more information, please check out https://www.alibabacloud.com/help/en/object-storage-service/latest/putbucketlogging
//
// bucketName bucket name to enable the log.
// targetBucket the target bucket name to store the log files.
@@ -402,7 +577,7 @@ func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
- isEnable bool) error {
+ isEnable bool, options ...Option) error {
var err error
var bs []byte
if isEnable {
@@ -428,12 +603,12 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
params := map[string]interface{}{}
params["logging"] = nil
- resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket.
@@ -442,15 +617,15 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) DeleteBucketLogging(bucketName string) error {
+func (client Client) DeleteBucketLogging(bucketName string, options ...Option) error {
params := map[string]interface{}{}
params["logging"] = nil
- resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLogging gets the bucket's logging settings
@@ -460,11 +635,11 @@ func (client Client) DeleteBucketLogging(bucketName string) error {
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
+func (client Client) GetBucketLogging(bucketName string, options ...Option) (GetBucketLoggingResult, error) {
var out GetBucketLoggingResult
params := map[string]interface{}{}
params["logging"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
return out, err
}
@@ -477,7 +652,7 @@ func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult
// SetBucketWebsite sets the bucket's static website's index and error page.
//
// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
-// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
+// For more information, please check out: https://www.alibabacloud.com/help/en/object-storage-service/latest/putbucketwebsite
//
// bucketName the bucket name to enable static web site.
// indexDocument index page.
@@ -485,7 +660,7 @@ func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
+func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string, options ...Option) error {
wxml := WebsiteXML{}
wxml.IndexDocument.Suffix = indexDocument
wxml.ErrorDocument.Key = errorDocument
@@ -503,123 +678,209 @@ func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument s
params := map[string]interface{}{}
params["website"] = nil
- resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
-// DeleteBucketWebsite deletes the bucket's static web site settings.
+// SetBucketWebsiteDetail sets the bucket's static website's detail
//
-// bucketName the bucket name.
+// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
+// For more information, please check out: https://www.alibabacloud.com/help/en/object-storage-service/latest/putbucketwebsite
+//
+// bucketName the bucket name to enable static web site.
+//
+// wxml the website's detail
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) DeleteBucketWebsite(bucketName string) error {
+func (client Client) SetBucketWebsiteDetail(bucketName string, wxml WebsiteXML, options ...Option) error {
+ bs, err := xml.Marshal(wxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
params := map[string]interface{}{}
params["website"] = nil
- resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
-// GetBucketWebsite gets the bucket's default page (index page) and the error page.
+// SetBucketWebsiteXml sets the bucket's static website's rule
//
-// bucketName the bucket name
+// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
+// For more information, please check out: https://www.alibabacloud.com/help/en/object-storage-service/latest/putbucketwebsite
+//
+// bucketName the bucket name to enable static web site.
+//
+// wxml the website's detail
//
-// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
- var out GetBucketWebsiteResult
+func (client Client) SetBucketWebsiteXml(bucketName string, webXml string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(webXml))
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
params := map[string]interface{}{}
params["website"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
if err != nil {
- return out, err
+ return err
}
defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
- err = xmlUnmarshal(resp.Body, &out)
- return out, err
+// DeleteBucketWebsite deletes the bucket's static web site settings.
+//
+// bucketName the bucket name.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketWebsite(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["website"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
-// SetBucketCORS sets the bucket's CORS rules
+// OpenMetaQuery Enables the metadata management feature for a bucket.
//
-// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
+// bucketName the bucket name.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) OpenMetaQuery(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["metaQuery"] = nil
+ params["comp"] = "add"
+ resp, err := client.do("POST", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetMetaQueryStatus Queries the information about the metadata index library of a bucket.
//
// bucketName the bucket name
-// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go.
//
+// GetMetaQueryStatusResult the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
- corsxml := CORSXML{}
- for _, v := range corsRules {
- cr := CORSRule{}
- cr.AllowedMethod = v.AllowedMethod
- cr.AllowedOrigin = v.AllowedOrigin
- cr.AllowedHeader = v.AllowedHeader
- cr.ExposeHeader = v.ExposeHeader
- cr.MaxAgeSeconds = v.MaxAgeSeconds
- corsxml.CORSRules = append(corsxml.CORSRules, cr)
+func (client Client) GetMetaQueryStatus(bucketName string, options ...Option) (GetMetaQueryStatusResult, error) {
+ var out GetMetaQueryStatusResult
+ params := map[string]interface{}{}
+ params["metaQuery"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
}
+ defer resp.Body.Close()
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
- bs, err := xml.Marshal(corsxml)
+// DoMetaQuery Queries the objects that meet specified conditions and lists the information about objects based on specified fields and sorting methods.
+//
+// bucketName the bucket name
+//
+// metaQuery the option of query
+//
+// DoMetaQueryResult the result object upon successful request. It's only valid when error is nil.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DoMetaQuery(bucketName string, metaQuery MetaQuery, options ...Option) (DoMetaQueryResult, error) {
+ var out DoMetaQueryResult
+ bs, err := xml.Marshal(metaQuery)
if err != nil {
- return err
+ return out, err
}
- buffer := new(bytes.Buffer)
- buffer.Write(bs)
+ out, err = client.DoMetaQueryXml(bucketName, string(bs), options...)
+ return out, err
+}
+// DoMetaQueryXml Queries the objects that meet specified conditions and lists the information about objects based on specified fields and sorting methods.
+//
+// bucketName the bucket name
+//
+// metaQuery the option of query
+//
+// DoMetaQueryResult the result object upon successful request. It's only valid when error is nil.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DoMetaQueryXml(bucketName string, metaQueryXml string, options ...Option) (DoMetaQueryResult, error) {
+ var out DoMetaQueryResult
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(metaQueryXml))
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
- params["cors"] = nil
- resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ params["metaQuery"] = nil
+ params["comp"] = "query"
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
if err != nil {
- return err
+ return out, err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
}
-// DeleteBucketCORS deletes the bucket's static website settings.
+// CloseMetaQuery Disables the metadata management feature for a bucket.
//
// bucketName the bucket name.
//
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) DeleteBucketCORS(bucketName string) error {
+func (client Client) CloseMetaQuery(bucketName string, options ...Option) error {
params := map[string]interface{}{}
- params["cors"] = nil
- resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ params["metaQuery"] = nil
+ params["comp"] = "delete"
+ resp, err := client.do("POST", bucketName, params, nil, nil, options...)
if err != nil {
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
-// GetBucketCORS gets the bucket's CORS settings.
+// GetBucketWebsite gets the bucket's default page (index page) and the error page.
//
-// bucketName the bucket name.
-// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil.
+// bucketName the bucket name
//
+// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
- var out GetBucketCORSResult
+func (client Client) GetBucketWebsite(bucketName string, options ...Option) (GetBucketWebsiteResult, error) {
+ var out GetBucketWebsiteResult
params := map[string]interface{}{}
- params["cors"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ params["website"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
return out, err
}
@@ -629,37 +890,1802 @@ func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, erro
return out, err
}
-// GetBucketInfo gets the bucket information.
+// GetBucketWebsiteXml gets the bucket's website config xml config.
//
-// bucketName the bucket name.
-// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil.
+// bucketName the bucket name
//
+// string the bucket's xml config, It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
-func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
- var out GetBucketInfoResult
+func (client Client) GetBucketWebsiteXml(bucketName string, options ...Option) (string, error) {
params := map[string]interface{}{}
- params["bucketInfo"] = nil
- resp, err := client.do("GET", bucketName, params, nil, nil)
+ params["website"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
if err != nil {
- return out, err
+ return "", err
}
defer resp.Body.Close()
- err = xmlUnmarshal(resp.Body, &out)
+ body, err := ioutil.ReadAll(resp.Body)
+
+ out := string(body)
return out, err
}
-// LimitUploadSpeed: set upload bandwidth limit speed,default is 0,unlimited
-// upSpeed: KB/s, 0 is unlimited,default is 0
-// error:it's nil if success, otherwise failure
-func (client Client) LimitUploadSpeed(upSpeed int) error {
- if client.Config == nil {
- return fmt.Errorf("client config is nil")
- }
+// SetBucketCORS sets the bucket's CORS rules
+//
+// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
+//
+// bucketName the bucket name
+// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule, options ...Option) error {
+ corsxml := CORSXML{}
+ for _, v := range corsRules {
+ cr := CORSRule{}
+ cr.AllowedMethod = v.AllowedMethod
+ cr.AllowedOrigin = v.AllowedOrigin
+ cr.AllowedHeader = v.AllowedHeader
+ cr.ExposeHeader = v.ExposeHeader
+ cr.MaxAgeSeconds = v.MaxAgeSeconds
+ corsxml.CORSRules = append(corsxml.CORSRules, cr)
+ }
+
+ bs, err := xml.Marshal(corsxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// SetBucketCORSV2 sets the bucket's CORS rules
+//
+// bucketName the bucket name
+// putBucketCORS the CORS rules to set.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketCORSV2(bucketName string, putBucketCORS PutBucketCORS, options ...Option) error {
+ bs, err := xml.Marshal(putBucketCORS)
+ if err != nil {
+ return err
+ }
+ err = client.SetBucketCORSXml(bucketName, string(bs), options...)
+ return err
+}
+
+func (client Client) SetBucketCORSXml(bucketName string, xmlBody string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// DeleteBucketCORS deletes the bucket's static website settings.
+//
+// bucketName the bucket name.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketCORS(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// GetBucketCORS gets the bucket's CORS settings.
+//
+// bucketName the bucket name.
+// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketCORS(bucketName string, options ...Option) (GetBucketCORSResult, error) {
+ var out GetBucketCORSResult
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+func (client Client) GetBucketCORSXml(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// GetBucketInfo gets the bucket information.
+//
+// bucketName the bucket name.
+// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketInfo(bucketName string, options ...Option) (GetBucketInfoResult, error) {
+ var out GetBucketInfoResult
+ params := map[string]interface{}{}
+ params["bucketInfo"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+
+ // convert None to ""
+ if err == nil {
+ if out.BucketInfo.SseRule.KMSMasterKeyID == "None" {
+ out.BucketInfo.SseRule.KMSMasterKeyID = ""
+ }
+
+ if out.BucketInfo.SseRule.SSEAlgorithm == "None" {
+ out.BucketInfo.SseRule.SSEAlgorithm = ""
+ }
+
+ if out.BucketInfo.SseRule.KMSDataEncryption == "None" {
+ out.BucketInfo.SseRule.KMSDataEncryption = ""
+ }
+ }
+ return out, err
+}
+
+// SetBucketVersioning set bucket versioning:Enabled、Suspended
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) SetBucketVersioning(bucketName string, versioningConfig VersioningConfig, options ...Option) error {
+ var err error
+ var bs []byte
+ bs, err = xml.Marshal(versioningConfig)
+
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["versioning"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketVersioning get bucket versioning status:Enabled、Suspended
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketVersioning(bucketName string, options ...Option) (GetBucketVersioningResult, error) {
+ var out GetBucketVersioningResult
+ params := map[string]interface{}{}
+ params["versioning"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// SetBucketEncryption set bucket encryption config
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) SetBucketEncryption(bucketName string, encryptionRule ServerEncryptionRule, options ...Option) error {
+ var err error
+ var bs []byte
+ bs, err = xml.Marshal(encryptionRule)
+
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["encryption"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketEncryption get bucket encryption
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketEncryption(bucketName string, options ...Option) (GetBucketEncryptionResult, error) {
+ var out GetBucketEncryptionResult
+ params := map[string]interface{}{}
+ params["encryption"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// DeleteBucketEncryption delete bucket encryption config
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error bucket
+func (client Client) DeleteBucketEncryption(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["encryption"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// SetBucketTagging add tagging to bucket
+// bucketName name of bucket
+// tagging tagging to be added
+// error nil if success, otherwise error
+func (client Client) SetBucketTagging(bucketName string, tagging Tagging, options ...Option) error {
+ var err error
+ var bs []byte
+ bs, err = xml.Marshal(tagging)
+
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["tagging"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketTagging get tagging of the bucket
+// bucketName name of bucket
+// error nil if success, otherwise error
+func (client Client) GetBucketTagging(bucketName string, options ...Option) (GetBucketTaggingResult, error) {
+ var out GetBucketTaggingResult
+ params := map[string]interface{}{}
+ params["tagging"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// DeleteBucketTagging delete bucket tagging
+// bucketName name of bucket
+// error nil if success, otherwise error
+//
+func (client Client) DeleteBucketTagging(bucketName string, options ...Option) error {
+ key, _ := FindOption(options, "tagging", nil)
+ params := map[string]interface{}{}
+ if key == nil {
+ params["tagging"] = nil
+ } else {
+ params["tagging"] = key.(string)
+ }
+
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// GetBucketStat get bucket stat
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketStat(bucketName string, options ...Option) (GetBucketStatResult, error) {
+ var out GetBucketStatResult
+ params := map[string]interface{}{}
+ params["stat"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// GetBucketPolicy API operation for Object Storage Service.
+//
+// Get the policy from the bucket.
+//
+// bucketName the bucket name.
+//
+// string return the bucket's policy, and it's only valid when error is nil.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketPolicy(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["policy"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+
+ out := string(body)
+ return out, err
+}
+
+// SetBucketPolicy API operation for Object Storage Service.
+//
+// Set the policy from the bucket.
+//
+// bucketName the bucket name.
+//
+// policy the bucket policy.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketPolicy(bucketName string, policy string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["policy"] = nil
+
+ buffer := strings.NewReader(policy)
+
+ resp, err := client.do("PUT", bucketName, params, nil, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// DeleteBucketPolicy API operation for Object Storage Service.
+//
+// Deletes the policy from the bucket.
+//
+// bucketName the bucket name.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketPolicy(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["policy"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// SetBucketRequestPayment API operation for Object Storage Service.
+//
+// Set the requestPayment of bucket
+//
+// bucketName the bucket name.
+//
+// paymentConfig the payment configuration
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketRequestPayment(bucketName string, paymentConfig RequestPaymentConfiguration, options ...Option) error {
+ params := map[string]interface{}{}
+ params["requestPayment"] = nil
+
+ var bs []byte
+ bs, err := xml.Marshal(paymentConfig)
+
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketRequestPayment API operation for Object Storage Service.
+//
+// Get bucket requestPayment
+//
+// bucketName the bucket name.
+//
+// RequestPaymentConfiguration the payment configuration
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketRequestPayment(bucketName string, options ...Option) (RequestPaymentConfiguration, error) {
+ var out RequestPaymentConfiguration
+ params := map[string]interface{}{}
+ params["requestPayment"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// GetUserQoSInfo API operation for Object Storage Service.
+//
+// Get user qos.
+//
+// UserQoSConfiguration the User Qos and range Information.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetUserQoSInfo(options ...Option) (UserQoSConfiguration, error) {
+ var out UserQoSConfiguration
+ params := map[string]interface{}{}
+ params["qosInfo"] = nil
+
+ resp, err := client.do("GET", "", params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// SetBucketQoSInfo API operation for Object Storage Service.
+//
+// Set Bucket Qos information.
+//
+// bucketName the bucket name.
+//
+// qosConf the qos configuration.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketQoSInfo(bucketName string, qosConf BucketQoSConfiguration, options ...Option) error {
+ params := map[string]interface{}{}
+ params["qosInfo"] = nil
+
+ var bs []byte
+ bs, err := xml.Marshal(qosConf)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentTpye := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentTpye
+
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketQosInfo API operation for Object Storage Service.
+//
+// Get Bucket Qos information.
+//
+// bucketName the bucket name.
+//
+// BucketQoSConfiguration the return qos configuration.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketQosInfo(bucketName string, options ...Option) (BucketQoSConfiguration, error) {
+ var out BucketQoSConfiguration
+ params := map[string]interface{}{}
+ params["qosInfo"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// DeleteBucketQosInfo API operation for Object Storage Service.
+//
+// Delete Bucket QoS information.
+//
+// bucketName the bucket name.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketQosInfo(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["qosInfo"] = nil
+
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// SetBucketInventory API operation for Object Storage Service
+//
+// Set the Bucket inventory.
+//
+// bucketName the bucket name.
+//
+// inventoryConfig the inventory configuration.
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) SetBucketInventory(bucketName string, inventoryConfig InventoryConfiguration, options ...Option) error {
+ params := map[string]interface{}{}
+ params["inventoryId"] = inventoryConfig.Id
+ params["inventory"] = nil
+
+ var bs []byte
+ bs, err := xml.Marshal(inventoryConfig)
+
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// SetBucketInventoryXml API operation for Object Storage Service
+//
+// Set the Bucket inventory
+//
+// bucketName the bucket name.
+//
+// xmlBody the inventory configuration.
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) SetBucketInventoryXml(bucketName string, xmlBody string, options ...Option) error {
+ var inventoryConfig InventoryConfiguration
+ err := xml.Unmarshal([]byte(xmlBody), &inventoryConfig)
+ if err != nil {
+ return err
+ }
+
+ if inventoryConfig.Id == "" {
+ return fmt.Errorf("inventory id is empty in xml")
+ }
+
+ params := map[string]interface{}{}
+ params["inventoryId"] = inventoryConfig.Id
+ params["inventory"] = nil
+
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketInventory API operation for Object Storage Service
+//
+// Get the Bucket inventory.
+//
+// bucketName tht bucket name.
+//
+// strInventoryId the inventory id.
+//
+// InventoryConfiguration the inventory configuration.
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) GetBucketInventory(bucketName string, strInventoryId string, options ...Option) (InventoryConfiguration, error) {
+ var out InventoryConfiguration
+ params := map[string]interface{}{}
+ params["inventory"] = nil
+ params["inventoryId"] = strInventoryId
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// GetBucketInventoryXml API operation for Object Storage Service
+//
+// Get the Bucket inventory.
+//
+// bucketName tht bucket name.
+//
+// strInventoryId the inventory id.
+//
+// InventoryConfiguration the inventory configuration.
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) GetBucketInventoryXml(bucketName string, strInventoryId string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["inventory"] = nil
+ params["inventoryId"] = strInventoryId
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// ListBucketInventory API operation for Object Storage Service
+//
+// List the Bucket inventory.
+//
+// bucketName tht bucket name.
+//
+// continuationToken the users token.
+//
+// ListInventoryConfigurationsResult list all inventory configuration by .
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) ListBucketInventory(bucketName, continuationToken string, options ...Option) (ListInventoryConfigurationsResult, error) {
+ var out ListInventoryConfigurationsResult
+ params := map[string]interface{}{}
+ params["inventory"] = nil
+ if continuationToken == "" {
+ params["continuation-token"] = nil
+ } else {
+ params["continuation-token"] = continuationToken
+ }
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// ListBucketInventoryXml API operation for Object Storage Service
+//
+// List the Bucket inventory.
+//
+// bucketName tht bucket name.
+//
+// continuationToken the users token.
+//
+// ListInventoryConfigurationsResult list all inventory configuration by .
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) ListBucketInventoryXml(bucketName, continuationToken string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["inventory"] = nil
+ if continuationToken == "" {
+ params["continuation-token"] = nil
+ } else {
+ params["continuation-token"] = continuationToken
+ }
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// DeleteBucketInventory API operation for Object Storage Service.
+//
+// Delete Bucket inventory information.
+//
+// bucketName tht bucket name.
+//
+// strInventoryId the inventory id.
+//
+// error it's nil if no error, otherwise it's an error.
+//
+func (client Client) DeleteBucketInventory(bucketName, strInventoryId string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["inventory"] = nil
+ params["inventoryId"] = strInventoryId
+
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// SetBucketAsyncTask API operation for set async fetch task
+//
+// bucketName tht bucket name.
+//
+// asynConf configruation
+//
+// error it's nil if success, otherwise it's an error.
+func (client Client) SetBucketAsyncTask(bucketName string, asynConf AsyncFetchTaskConfiguration, options ...Option) (AsyncFetchTaskResult, error) {
+ var out AsyncFetchTaskResult
+ params := map[string]interface{}{}
+ params["asyncFetch"] = nil
+
+ var bs []byte
+ bs, err := xml.Marshal(asynConf)
+
+ if err != nil {
+ return out, err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+
+ if err != nil {
+ return out, err
+ }
+
+ defer resp.Body.Close()
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// GetBucketAsyncTask API operation for set async fetch task
+//
+// bucketName tht bucket name.
+//
+// taskid returned by SetBucketAsyncTask
+//
+// error it's nil if success, otherwise it's an error.
+func (client Client) GetBucketAsyncTask(bucketName string, taskID string, options ...Option) (AsynFetchTaskInfo, error) {
+ var out AsynFetchTaskInfo
+ params := map[string]interface{}{}
+ params["asyncFetch"] = nil
+
+ headers := make(map[string]string)
+ headers[HTTPHeaderOssTaskID] = taskID
+ resp, err := client.do("GET", bucketName, params, headers, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// InitiateBucketWorm creates bucket worm Configuration
+// bucketName the bucket name.
+// retentionDays the retention period in days
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) InitiateBucketWorm(bucketName string, retentionDays int, options ...Option) (string, error) {
+ var initiateWormConf InitiateWormConfiguration
+ initiateWormConf.RetentionPeriodInDays = retentionDays
+
+ var respHeader http.Header
+ isOptSet, _, _ := IsOptionSet(options, responseHeader)
+ if !isOptSet {
+ options = append(options, GetResponseHeader(&respHeader))
+ }
+
+ bs, err := xml.Marshal(initiateWormConf)
+ if err != nil {
+ return "", err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["worm"] = nil
+
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ respOpt, _ := FindOption(options, responseHeader, nil)
+ wormID := ""
+ err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+ if err == nil && respOpt != nil {
+ wormID = (respOpt.(*http.Header)).Get("x-oss-worm-id")
+ }
+ return wormID, err
+}
+
+// AbortBucketWorm delete bucket worm Configuration
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) AbortBucketWorm(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["worm"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// CompleteBucketWorm complete bucket worm Configuration
+// bucketName the bucket name.
+// wormID the worm id
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) CompleteBucketWorm(bucketName string, wormID string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["wormId"] = wormID
+ resp, err := client.do("POST", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// ExtendBucketWorm exetend bucket worm Configuration
+// bucketName the bucket name.
+// retentionDays the retention period in days
+// wormID the worm id
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) ExtendBucketWorm(bucketName string, retentionDays int, wormID string, options ...Option) error {
+ var extendWormConf ExtendWormConfiguration
+ extendWormConf.RetentionPeriodInDays = retentionDays
+
+ bs, err := xml.Marshal(extendWormConf)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["wormId"] = wormID
+ params["wormExtend"] = nil
+
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketWorm get bucket worm Configuration
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketWorm(bucketName string, options ...Option) (WormConfiguration, error) {
+ var out WormConfiguration
+ params := map[string]interface{}{}
+ params["worm"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// SetBucketTransferAcc set bucket transfer acceleration configuration
+// bucketName the bucket name.
+// accConf bucket transfer acceleration configuration
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketTransferAcc(bucketName string, accConf TransferAccConfiguration, options ...Option) error {
+ bs, err := xml.Marshal(accConf)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["transferAcceleration"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketTransferAcc get bucket transfer acceleration configuration
+// bucketName the bucket name.
+// accConf bucket transfer acceleration configuration
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketTransferAcc(bucketName string, options ...Option) (TransferAccConfiguration, error) {
+ var out TransferAccConfiguration
+ params := map[string]interface{}{}
+ params["transferAcceleration"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// DeleteBucketTransferAcc delete bucket transfer acceleration configuration
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketTransferAcc(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["transferAcceleration"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// PutBucketReplication put bucket replication configuration
+// bucketName the bucket name.
+// xmlBody the replication configuration.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) PutBucketReplication(bucketName string, xmlBody string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["replication"] = nil
+ params["comp"] = "add"
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// PutBucketRTC put bucket replication rtc
+// bucketName the bucket name.
+// rtc the bucket rtc config.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) PutBucketRTC(bucketName string, rtc PutBucketRTC, options ...Option) error {
+ bs, err := xml.Marshal(rtc)
+ if err != nil {
+ return err
+ }
+ err = client.PutBucketRTCXml(bucketName, string(bs), options...)
+ return err
+}
+
+// PutBucketRTCXml put bucket rtc configuration
+// bucketName the bucket name.
+// xmlBody the rtc configuration in xml format.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) PutBucketRTCXml(bucketName string, xmlBody string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["rtc"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketReplication get bucket replication configuration
+// bucketName the bucket name.
+// string the replication configuration.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketReplication(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["replication"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(data), err
+}
+
+// DeleteBucketReplication delete bucket replication configuration
+// bucketName the bucket name.
+// ruleId the ID of the replication configuration.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketReplication(bucketName string, ruleId string, options ...Option) error {
+ replicationxml := ReplicationXML{}
+ replicationxml.ID = ruleId
+
+ bs, err := xml.Marshal(replicationxml)
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["replication"] = nil
+ params["comp"] = "delete"
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketReplicationLocation get the locations of the target bucket that can be copied to
+// bucketName the bucket name.
+// string the locations of the target bucket that can be copied to.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketReplicationLocation(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["replicationLocation"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(data), err
+}
+
+// GetBucketReplicationProgress get the replication progress of bucket
+// bucketName the bucket name.
+// ruleId the ID of the replication configuration.
+// string the replication progress of bucket.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketReplicationProgress(bucketName string, ruleId string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["replicationProgress"] = nil
+ if ruleId != "" {
+ params["rule-id"] = ruleId
+ }
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(data), err
+}
+
+// GetBucketAccessMonitor get bucket's access monitor config
+// bucketName the bucket name.
+// GetBucketAccessMonitorResult the access monitor configuration result of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketAccessMonitor(bucketName string, options ...Option) (GetBucketAccessMonitorResult, error) {
+ var out GetBucketAccessMonitorResult
+ body, err := client.GetBucketAccessMonitorXml(bucketName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// GetBucketAccessMonitorXml get bucket's access monitor config
+// bucketName the bucket name.
+// string the access monitor configuration result of bucket xml foramt.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketAccessMonitorXml(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["accessmonitor"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// PutBucketAccessMonitor get bucket's access monitor config
+// bucketName the bucket name.
+// accessMonitor the access monitor configuration of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketAccessMonitor(bucketName string, accessMonitor PutBucketAccessMonitor, options ...Option) error {
+ bs, err := xml.Marshal(accessMonitor)
+ if err != nil {
+ return err
+ }
+ err = client.PutBucketAccessMonitorXml(bucketName, string(bs), options...)
+ return err
+}
+
+// PutBucketAccessMonitorXml get bucket's access monitor config
+// bucketName the bucket name.
+// xmlData the access monitor configuration in xml foramt
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketAccessMonitorXml(bucketName string, xmlData string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlData))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+ params := map[string]interface{}{}
+ params["accessmonitor"] = nil
+ resp, err := client.do("PUT", bucketName, params, nil, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// ListBucketCname list bucket's binding cname
+// bucketName the bucket name.
+// string the xml configuration of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) ListBucketCname(bucketName string, options ...Option) (ListBucketCnameResult, error) {
+ var out ListBucketCnameResult
+ body, err := client.GetBucketCname(bucketName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// GetBucketCname get bucket's binding cname
+// bucketName the bucket name.
+// string the xml configuration of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketCname(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["cname"] = nil
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(data), err
+}
+
+// CreateBucketCnameToken create a token for the cname.
+// bucketName the bucket name.
+// cname a custom domain name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) CreateBucketCnameToken(bucketName string, cname string, options ...Option) (CreateBucketCnameTokenResult, error) {
+ var out CreateBucketCnameTokenResult
+ params := map[string]interface{}{}
+ params["cname"] = nil
+ params["comp"] = "token"
+
+ rxml := CnameConfigurationXML{}
+ rxml.Domain = cname
+
+ bs, err := xml.Marshal(rxml)
+ if err != nil {
+ return out, err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// GetBucketCnameToken get a token for the cname
+// bucketName the bucket name.
+// cname a custom domain name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketCnameToken(bucketName string, cname string, options ...Option) (GetBucketCnameTokenResult, error) {
+ var out GetBucketCnameTokenResult
+ params := map[string]interface{}{}
+ params["cname"] = cname
+ params["comp"] = "token"
+
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+// PutBucketCnameXml map a custom domain name to a bucket
+// bucketName the bucket name.
+// xmlBody the cname configuration in xml foramt
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketCnameXml(bucketName string, xmlBody string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["cname"] = nil
+ params["comp"] = "add"
+
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlBody))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// PutBucketCname map a custom domain name to a bucket
+// bucketName the bucket name.
+// cname a custom domain name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketCname(bucketName string, cname string, options ...Option) error {
+ rxml := CnameConfigurationXML{}
+ rxml.Domain = cname
+ bs, err := xml.Marshal(rxml)
+ if err != nil {
+ return err
+ }
+ return client.PutBucketCnameXml(bucketName, string(bs), options...)
+}
+
+// PutBucketCnameWithCertificate map a custom domain name to a bucket
+// bucketName the bucket name.
+// PutBucketCname the bucket cname config in struct format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketCnameWithCertificate(bucketName string, putBucketCname PutBucketCname, options ...Option) error {
+ bs, err := xml.Marshal(putBucketCname)
+ if err != nil {
+ return err
+ }
+ return client.PutBucketCnameXml(bucketName, string(bs), options...)
+}
+
+// DeleteBucketCname remove the mapping of the custom domain name from a bucket.
+// bucketName the bucket name.
+// cname a custom domain name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) DeleteBucketCname(bucketName string, cname string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["cname"] = nil
+ params["comp"] = "delete"
+
+ rxml := CnameConfigurationXML{}
+ rxml.Domain = cname
+
+ bs, err := xml.Marshal(rxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ resp, err := client.do("POST", bucketName, params, headers, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// PutBucketResourceGroup set bucket's resource group
+// bucketName the bucket name.
+// resourceGroup the resource group configuration of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketResourceGroup(bucketName string, resourceGroup PutBucketResourceGroup, options ...Option) error {
+ bs, err := xml.Marshal(resourceGroup)
+ if err != nil {
+ return err
+ }
+ err = client.PutBucketResourceGroupXml(bucketName, string(bs), options...)
+ return err
+}
+
+// PutBucketResourceGroupXml set bucket's resource group
+// bucketName the bucket name.
+// xmlData the resource group in xml format
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketResourceGroupXml(bucketName string, xmlData string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlData))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+ params := map[string]interface{}{}
+ params["resourceGroup"] = nil
+ resp, err := client.do("PUT", bucketName, params, nil, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketResourceGroup get bucket's resource group
+// bucketName the bucket name.
+// GetBucketResourceGroupResult the resource group configuration result of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketResourceGroup(bucketName string, options ...Option) (GetBucketResourceGroupResult, error) {
+ var out GetBucketResourceGroupResult
+ body, err := client.GetBucketResourceGroupXml(bucketName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// GetBucketResourceGroupXml get bucket's resource group
+// bucketName the bucket name.
+// string the resource group result of bucket xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketResourceGroupXml(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["resourceGroup"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// PutBucketStyle set bucket's style
+// bucketName the bucket name.
+// styleContent the style content.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketStyle(bucketName, styleName string, styleContent string, options ...Option) error {
+ bs := fmt.Sprintf("", styleContent)
+ err := client.PutBucketStyleXml(bucketName, styleName, bs, options...)
+ return err
+}
+
+// PutBucketStyleXml set bucket's style
+// bucketName the bucket name.
+// styleName the style name.
+// xmlData the style in xml format
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketStyleXml(bucketName, styleName, xmlData string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlData))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+ params := map[string]interface{}{}
+ params["style"] = nil
+ params["styleName"] = styleName
+ resp, err := client.do("PUT", bucketName, params, nil, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketStyle get bucket's style
+// bucketName the bucket name.
+// styleName the bucket style name.
+// GetBucketStyleResult the style result of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketStyle(bucketName, styleName string, options ...Option) (GetBucketStyleResult, error) {
+ var out GetBucketStyleResult
+ body, err := client.GetBucketStyleXml(bucketName, styleName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// GetBucketStyleXml get bucket's style
+// bucketName the bucket name.
+// styleName the bucket style name.
+// string the style result of bucket in xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketStyleXml(bucketName, styleName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["style"] = nil
+ params["styleName"] = styleName
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// ListBucketStyle get bucket's styles
+// bucketName the bucket name.
+// GetBucketListStyleResult the list style result of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) ListBucketStyle(bucketName string, options ...Option) (GetBucketListStyleResult, error) {
+ var out GetBucketListStyleResult
+ body, err := client.ListBucketStyleXml(bucketName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// ListBucketStyleXml get bucket's list style
+// bucketName the bucket name.
+// string the style result of bucket in xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) ListBucketStyleXml(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["style"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// DeleteBucketStyle delete bucket's style
+// bucketName the bucket name.
+// styleName the bucket style name.
+// string the style result of bucket in xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) DeleteBucketStyle(bucketName, styleName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["style"] = bucketName
+ params["styleName"] = styleName
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// PutBucketResponseHeader set bucket response header
+// bucketName the bucket name.
+// xmlData the resource group in xml format
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketResponseHeader(bucketName string, responseHeader PutBucketResponseHeader, options ...Option) error {
+ bs, err := xml.Marshal(responseHeader)
+ if err != nil {
+ return err
+ }
+ err = client.PutBucketResponseHeaderXml(bucketName, string(bs), options...)
+ return err
+}
+
+// PutBucketResponseHeaderXml set bucket response header
+// bucketName the bucket name.
+// xmlData the bucket response header in xml format
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) PutBucketResponseHeaderXml(bucketName, xmlData string, options ...Option) error {
+ buffer := new(bytes.Buffer)
+ buffer.Write([]byte(xmlData))
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+ params := map[string]interface{}{}
+ params["responseHeader"] = nil
+ resp, err := client.do("PUT", bucketName, params, nil, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketResponseHeader get bucket's response header.
+// bucketName the bucket name.
+// GetBucketResponseHeaderResult the response header result of bucket.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketResponseHeader(bucketName string, options ...Option) (GetBucketResponseHeaderResult, error) {
+ var out GetBucketResponseHeaderResult
+ body, err := client.GetBucketResponseHeaderXml(bucketName, options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// GetBucketResponseHeaderXml get bucket's resource group
+// bucketName the bucket name.
+// string the response header result of bucket xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) GetBucketResponseHeaderXml(bucketName string, options ...Option) (string, error) {
+ params := map[string]interface{}{}
+ params["responseHeader"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// DeleteBucketResponseHeader delete response header from a bucket.
+// bucketName the bucket name.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) DeleteBucketResponseHeader(bucketName string, options ...Option) error {
+ params := map[string]interface{}{}
+ params["responseHeader"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// DescribeRegions get describe regions
+// GetDescribeRegionsResult the result of bucket in xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) DescribeRegions(options ...Option) (DescribeRegionsResult, error) {
+ var out DescribeRegionsResult
+ body, err := client.DescribeRegionsXml(options...)
+ if err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(strings.NewReader(body), &out)
+ return out, err
+}
+
+// DescribeRegionsXml get describe regions
+// string the style result of bucket in xml format.
+// error it's nil if no error, otherwise it's an error object.
+func (client Client) DescribeRegionsXml(options ...Option) (string, error) {
+ params, err := GetRawParams(options)
+ if err != nil {
+ return "", err
+ }
+ if params["regions"] == nil {
+ params["regions"] = nil
+ }
+ resp, err := client.do("GET", "", params, nil, nil, options...)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ out := string(body)
+ return out, err
+}
+
+// LimitUploadSpeed set upload bandwidth limit speed,default is 0,unlimited
+// upSpeed KB/s, 0 is unlimited,default is 0
+// error it's nil if success, otherwise failure
+func (client Client) LimitUploadSpeed(upSpeed int) error {
+ if client.Config == nil {
+ return fmt.Errorf("client config is nil")
+ }
return client.Config.LimitUploadSpeed(upSpeed)
}
+// LimitDownloadSpeed set download bandwidth limit speed,default is 0,unlimited
+// downSpeed KB/s, 0 is unlimited,default is 0
+// error it's nil if success, otherwise failure
+func (client Client) LimitDownloadSpeed(downSpeed int) error {
+ if client.Config == nil {
+ return fmt.Errorf("client config is nil")
+ }
+ return client.Config.LimitDownloadSpeed(downSpeed)
+}
+
// UseCname sets the flag of using CName. By default it's false.
//
// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false.
@@ -667,7 +2693,16 @@ func (client Client) LimitUploadSpeed(upSpeed int) error {
func UseCname(isUseCname bool) ClientOption {
return func(client *Client) {
client.Config.IsCname = isUseCname
- client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+ }
+}
+
+// ForcePathStyle sets the flag of using Path Style. By default it's false.
+//
+// isPathStyle true: the endpoint has the Path Style, false: the endpoint does not have Path Style. Default is false.
+//
+func ForcePathStyle(isPathStyle bool) ClientOption {
+ return func(client *Client) {
+ client.Config.IsPathStyle = isPathStyle
}
}
@@ -691,6 +2726,20 @@ func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
}
}
+// MaxConns sets the HTTP max connections for a client.
+//
+// maxIdleConns controls the maximum number of idle (keep-alive) connections across all hosts. Default is 100.
+// maxIdleConnsPerHost controls the maximum idle (keep-alive) connections to keep per-host. Default is 100.
+// maxConnsPerHost limits the total number of connections per host. Default is no limit.
+//
+func MaxConns(maxIdleConns, maxIdleConnsPerHost, maxConnsPerHost int) ClientOption {
+ return func(client *Client) {
+ client.Config.HTTPMaxConns.MaxIdleConns = maxIdleConns
+ client.Config.HTTPMaxConns.MaxIdleConnsPerHost = maxIdleConnsPerHost
+ client.Config.HTTPMaxConns.MaxConnsPerHost = maxConnsPerHost
+ }
+}
+
// SecurityToken sets the temporary user's SecurityToken.
//
// token STS token
@@ -738,6 +2787,7 @@ func EnableCRC(isEnableCRC bool) ClientOption {
func UserAgent(userAgent string) ClientOption {
return func(client *Client) {
client.Config.UserAgent = userAgent
+ client.Config.UserSetUa = true
}
}
@@ -749,7 +2799,6 @@ func Proxy(proxyHost string) ClientOption {
return func(client *Client) {
client.Config.IsUseProxy = true
client.Config.ProxyHost = proxyHost
- client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
@@ -766,7 +2815,6 @@ func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
client.Config.IsAuthProxy = true
client.Config.ProxyUser = proxyUser
client.Config.ProxyPassword = proxyPassword
- client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
@@ -789,7 +2837,7 @@ func SetLogLevel(LogLevel int) ClientOption {
}
//
-// SetLogLevel sets the oss sdk log level
+// SetLogger sets the oss sdk logger
//
func SetLogger(Logger *log.Logger) ClientOption {
return func(client *Client) {
@@ -797,9 +2845,105 @@ func SetLogger(Logger *log.Logger) ClientOption {
}
}
+// SetCredentialsProvider sets function for get the user's ak
+func SetCredentialsProvider(provider CredentialsProvider) ClientOption {
+ return func(client *Client) {
+ client.Config.CredentialsProvider = provider
+ }
+}
+
+// SetLocalAddr sets function for local addr
+func SetLocalAddr(localAddr net.Addr) ClientOption {
+ return func(client *Client) {
+ client.Config.LocalAddr = localAddr
+ }
+}
+
+// AuthVersion sets auth version: v1 or v2 signature which oss_server needed
+func AuthVersion(authVersion AuthVersionType) ClientOption {
+ return func(client *Client) {
+ client.Config.AuthVersion = authVersion
+ }
+}
+
+// AdditionalHeaders sets special http headers needed to be signed
+func AdditionalHeaders(headers []string) ClientOption {
+ return func(client *Client) {
+ client.Config.AdditionalHeaders = headers
+ }
+}
+
+// RedirectEnabled only effective from go1.7 onward,RedirectEnabled set http redirect enabled or not
+func RedirectEnabled(enabled bool) ClientOption {
+ return func(client *Client) {
+ client.Config.RedirectEnabled = enabled
+ }
+}
+
+// InsecureSkipVerify skip verifying tls certificate file
+func InsecureSkipVerify(enabled bool) ClientOption {
+ return func(client *Client) {
+ client.Config.InsecureSkipVerify = enabled
+ }
+}
+
+// Region set region
+func Region(region string) ClientOption {
+ return func(client *Client) {
+ client.Config.Region = region
+ }
+}
+
+// CloudBoxId set cloudBox id
+func CloudBoxId(cloudBoxId string) ClientOption {
+ return func(client *Client) {
+ client.Config.CloudBoxId = cloudBoxId
+ }
+}
+
+// Product set product type
+func Product(product string) ClientOption {
+ return func(client *Client) {
+ client.Config.Product = product
+ }
+}
+
// Private
func (client Client) do(method, bucketName string, params map[string]interface{},
- headers map[string]string, data io.Reader) (*Response, error) {
- return client.Conn.Do(method, bucketName, "", params,
- headers, data, 0, nil)
+ headers map[string]string, data io.Reader, options ...Option) (*Response, error) {
+ err := CheckBucketName(bucketName)
+ if len(bucketName) > 0 && err != nil {
+ return nil, err
+ }
+
+ // option headers
+ addHeaders := make(map[string]string)
+ err = handleOptions(addHeaders, options)
+ if err != nil {
+ return nil, err
+ }
+
+ // merge header
+ if headers == nil {
+ headers = make(map[string]string)
+ }
+
+ for k, v := range addHeaders {
+ if _, ok := headers[k]; !ok {
+ headers[k] = v
+ }
+ }
+
+ resp, err := client.Conn.Do(method, bucketName, "", params, headers, data, 0, nil)
+
+ // get response header
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ pRespHeader := respHeader.(*http.Header)
+ if resp != nil {
+ *pRespHeader = resp.Headers
+ }
+ }
+
+ return resp, err
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
index 8886102d..e5695411 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
@@ -4,10 +4,12 @@ import (
"bytes"
"fmt"
"log"
+ "net"
"os"
"time"
)
+// Define the level of the output log
const (
LogOff = iota
Error
@@ -16,6 +18,7 @@ const (
Debug
)
+// LogTag Tag for each level of log
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
// HTTPTimeout defines HTTP timeout.
@@ -27,42 +30,155 @@ type HTTPTimeout struct {
IdleConnTimeout time.Duration
}
+// HTTPMaxConns defines max idle connections and max idle connections per host
type HTTPMaxConns struct {
MaxIdleConns int
MaxIdleConnsPerHost int
+ MaxConnsPerHost int
+}
+
+// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken
+type Credentials interface {
+ GetAccessKeyID() string
+ GetAccessKeySecret() string
+ GetSecurityToken() string
+}
+
+// CredentialInfBuild is interface for get CredentialInf
+type CredentialsProvider interface {
+ GetCredentials() Credentials
+}
+
+type defaultCredentials struct {
+ config *Config
+}
+
+func (defCre *defaultCredentials) GetAccessKeyID() string {
+ return defCre.config.AccessKeyID
+}
+
+func (defCre *defaultCredentials) GetAccessKeySecret() string {
+ return defCre.config.AccessKeySecret
+}
+
+func (defCre *defaultCredentials) GetSecurityToken() string {
+ return defCre.config.SecurityToken
+}
+
+type defaultCredentialsProvider struct {
+ config *Config
+}
+
+func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
+ return &defaultCredentials{config: defBuild.config}
+}
+
+type envCredentials struct {
+ AccessKeyId string
+ AccessKeySecret string
+ SecurityToken string
+}
+
+type EnvironmentVariableCredentialsProvider struct {
+ cred Credentials
+}
+
+func (credentials *envCredentials) GetAccessKeyID() string {
+ return credentials.AccessKeyId
+}
+
+func (credentials *envCredentials) GetAccessKeySecret() string {
+ return credentials.AccessKeySecret
+}
+
+func (credentials *envCredentials) GetSecurityToken() string {
+ return credentials.SecurityToken
+}
+
+func (defBuild *EnvironmentVariableCredentialsProvider) GetCredentials() Credentials {
+ var accessID, accessKey, token string
+ if defBuild.cred == nil {
+ accessID = os.Getenv("OSS_ACCESS_KEY_ID")
+ accessKey = os.Getenv("OSS_ACCESS_KEY_SECRET")
+ token = os.Getenv("OSS_SESSION_TOKEN")
+ } else {
+ accessID = defBuild.cred.GetAccessKeyID()
+ accessKey = defBuild.cred.GetAccessKeySecret()
+ token = defBuild.cred.GetSecurityToken()
+ }
+
+ return &envCredentials{
+ AccessKeyId: accessID,
+ AccessKeySecret: accessKey,
+ SecurityToken: token,
+ }
+}
+
+func NewEnvironmentVariableCredentialsProvider() (EnvironmentVariableCredentialsProvider, error) {
+ var provider EnvironmentVariableCredentialsProvider
+ accessID := os.Getenv("OSS_ACCESS_KEY_ID")
+ if accessID == "" {
+ return provider, fmt.Errorf("access key id is empty!")
+ }
+ accessKey := os.Getenv("OSS_ACCESS_KEY_SECRET")
+ if accessKey == "" {
+ return provider, fmt.Errorf("access key secret is empty!")
+ }
+ token := os.Getenv("OSS_SESSION_TOKEN")
+ envCredential := &envCredentials{
+ AccessKeyId: accessID,
+ AccessKeySecret: accessKey,
+ SecurityToken: token,
+ }
+ return EnvironmentVariableCredentialsProvider{
+ cred: envCredential,
+ }, nil
}
// Config defines oss configuration
type Config struct {
- Endpoint string // OSS endpoint
- AccessKeyID string // AccessId
- AccessKeySecret string // AccessKey
- RetryTimes uint // Retry count by default it's 5.
- UserAgent string // SDK name/version/system information
- IsDebug bool // Enable debug mode. Default is false.
- Timeout uint // Timeout in seconds. By default it's 60.
- SecurityToken string // STS Token
- IsCname bool // If cname is in the endpoint.
- HTTPTimeout HTTPTimeout // HTTP timeout
- HTTPMaxConns HTTPMaxConns // Http max connections
- IsUseProxy bool // Flag of using proxy.
- ProxyHost string // Flag of using proxy host.
- IsAuthProxy bool // Flag of needing authentication.
- ProxyUser string // Proxy user
- ProxyPassword string // Proxy password
- IsEnableMD5 bool // Flag of enabling MD5 for upload.
- MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
- IsEnableCRC bool // Flag of enabling CRC for upload.
- LogLevel int // Log level
- Logger *log.Logger // For write log
- UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
- UploadLimiter *OssLimiter // Bandwidth limit reader for upload
-}
-
-// LimitUploadSpeed, uploadSpeed:KB/s, 0 is unlimited,default is 0
+ Endpoint string // OSS endpoint
+ AccessKeyID string // AccessId
+ AccessKeySecret string // AccessKey
+ RetryTimes uint // Retry count by default it's 5.
+ UserAgent string // SDK name/version/system information
+ IsDebug bool // Enable debug mode. Default is false.
+ Timeout uint // Timeout in seconds. By default it's 60.
+ SecurityToken string // STS Token
+ IsCname bool // If cname is in the endpoint.
+ IsPathStyle bool // If Path Style is in the endpoint.
+ HTTPTimeout HTTPTimeout // HTTP timeout
+ HTTPMaxConns HTTPMaxConns // Http max connections
+ IsUseProxy bool // Flag of using proxy.
+ ProxyHost string // Flag of using proxy host.
+ IsAuthProxy bool // Flag of needing authentication.
+ ProxyUser string // Proxy user
+ ProxyPassword string // Proxy password
+ IsEnableMD5 bool // Flag of enabling MD5 for upload.
+ MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
+ IsEnableCRC bool // Flag of enabling CRC for upload.
+ LogLevel int // Log level
+ Logger *log.Logger // For write log
+ UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
+ UploadLimiter *OssLimiter // Bandwidth limit reader for upload
+ DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
+ DownloadLimiter *OssLimiter // Bandwidth limit reader for download
+ CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
+ LocalAddr net.Addr // local client host info
+ UserSetUa bool // UserAgent is set by user or not
+ AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1
+ AdditionalHeaders []string // special http headers needed to be sign
+ RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
+ InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
+ Region string // such as cn-hangzhou
+ CloudBoxId string //
+ Product string // oss or oss-cloudbox, default is oss
+}
+
+// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
if uploadSpeed < 0 {
- return fmt.Errorf("erro,speed is less than 0")
+ return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
} else if uploadSpeed == 0 {
config.UploadLimitSpeed = 0
config.UploadLimiter = nil
@@ -77,7 +193,25 @@ func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
return err
}
-// WriteLog
+// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
+func (config *Config) LimitDownloadSpeed(downloadSpeed int) error {
+ if downloadSpeed < 0 {
+ return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0")
+ } else if downloadSpeed == 0 {
+ config.DownloadLimitSpeed = 0
+ config.DownloadLimiter = nil
+ return nil
+ }
+
+ var err error
+ config.DownloadLimiter, err = GetOssLimiter(downloadSpeed)
+ if err == nil {
+ config.DownloadLimitSpeed = downloadSpeed
+ }
+ return err
+}
+
+// WriteLog output log function
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
if config.LogLevel < LogLevel || config.Logger == nil {
return
@@ -89,6 +223,27 @@ func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
config.Logger.Printf("%s", logBuffer.String())
}
+// for get Credentials
+func (config *Config) GetCredentials() Credentials {
+ return config.CredentialsProvider.GetCredentials()
+}
+
+// for get Sign Product
+func (config *Config) GetSignProduct() string {
+ if config.CloudBoxId != "" {
+ return "oss-cloudbox"
+ }
+ return "oss"
+}
+
+// for get Sign Region
+func (config *Config) GetSignRegion() string {
+ if config.CloudBoxId != "" {
+ return config.CloudBoxId
+ }
+ return config.Region
+}
+
// getDefaultOssConfig gets the default configuration.
func getDefaultOssConfig() *Config {
config := Config{}
@@ -102,6 +257,7 @@ func getDefaultOssConfig() *Config {
config.Timeout = 60 // Seconds
config.SecurityToken = ""
config.IsCname = false
+ config.IsPathStyle = false
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
@@ -124,5 +280,14 @@ func getDefaultOssConfig() *Config {
config.LogLevel = LogOff
config.Logger = log.New(os.Stdout, "", log.LstdFlags)
+ provider := &defaultCredentialsProvider{config: &config}
+ config.CredentialsProvider = provider
+
+ config.AuthVersion = AuthV1
+ config.RedirectEnabled = true
+ config.InsecureSkipVerify = false
+
+ config.Product = "oss"
+
return &config
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
index 896295c3..7d404759 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
@@ -2,6 +2,7 @@ package oss
import (
"bytes"
+ "context"
"crypto/md5"
"encoding/base64"
"encoding/json"
@@ -27,7 +28,30 @@ type Conn struct {
client *http.Client
}
-var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var"}
+var signKeyList = []string{"acl", "uploads", "location", "cors",
+ "logging", "website", "referer", "lifecycle",
+ "delete", "append", "tagging", "objectMeta",
+ "uploadId", "partNumber", "security-token",
+ "position", "img", "style", "styleName",
+ "replication", "replicationProgress",
+ "replicationLocation", "cname", "bucketInfo",
+ "comp", "qos", "live", "status", "vod",
+ "startTime", "endTime", "symlink",
+ "x-oss-process", "response-content-type", "x-oss-traffic-limit",
+ "response-content-language", "response-expires",
+ "response-cache-control", "response-content-disposition",
+ "response-content-encoding", "udf", "udfName", "udfImage",
+ "udfId", "udfImageDesc", "udfApplication", "comp",
+ "udfApplicationLog", "restore", "callback", "callback-var", "qosInfo",
+ "policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment",
+ "x-oss-request-payer", "sequential",
+ "inventory", "inventoryId", "continuation-token", "asyncFetch",
+ "worm", "wormId", "wormExtend", "withHashContext",
+ "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256",
+ "x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration",
+ "regionList", "cloudboxes", "x-oss-ac-source-ip", "x-oss-ac-subnet-mask", "x-oss-ac-vpc-id", "x-oss-ac-forward-allow",
+ "metaQuery", "resourceGroup", "rtc", "x-oss-async-process", "responseHeader",
+}
// init initializes Conn
func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
@@ -41,9 +65,19 @@ func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client)
if err != nil {
return err
}
+ if config.IsAuthProxy {
+ if config.ProxyPassword != "" {
+ proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword)
+ } else {
+ proxyURL.User = url.User(config.ProxyUser)
+ }
+ }
transport.Proxy = http.ProxyURL(proxyURL)
}
client = &http.Client{Transport: transport}
+ if !config.RedirectEnabled {
+ disableHTTPRedirect(client)
+ }
}
conn.config = config
@@ -55,16 +89,35 @@ func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client)
// Do sends request and returns the response
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
+ data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
+ return conn.DoWithContext(nil, method, bucketName, objectName, params, headers, data, initCRC, listener)
+}
+
+// DoWithContext sends request and returns the response with context
+func (conn Conn) DoWithContext(ctx context.Context, method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
urlParams := conn.getURLParams(params)
subResource := conn.getSubResource(params)
uri := conn.url.getURL(bucketName, objectName, urlParams)
- resource := conn.url.getResource(bucketName, objectName, subResource)
- return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
+
+ resource := ""
+ if conn.config.AuthVersion != AuthV4 {
+ resource = conn.getResource(bucketName, objectName, subResource)
+ } else {
+ resource = conn.getResourceV4(bucketName, objectName, subResource)
+ }
+
+ return conn.doRequest(ctx, method, uri, resource, headers, data, initCRC, listener)
}
// DoURL sends the request with signed URL and returns the response result.
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
+ data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
+ return conn.DoURLWithContext(nil, method, signedURL, headers, data, initCRC, listener)
+}
+
+// DoURLWithContext sends the request with signed URL and context and returns the response result.
+func (conn Conn) DoURLWithContext(ctx context.Context, method HTTPMethod, signedURL string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
// Get URI from signedURL
uri, err := url.ParseRequestURI(signedURL)
@@ -83,6 +136,9 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
Host: uri.Host,
}
+ if ctx != nil {
+ req = req.WithContext(ctx)
+ }
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
@@ -98,7 +154,7 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
req.Header.Set("Proxy-Authorization", basic)
}
- req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
+ req.Header.Set(HTTPHeaderHost, req.Host)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
@@ -108,28 +164,30 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
}
// Transfer started
- event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
+ event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
publishProgress(listener, event)
if conn.config.LogLevel >= Debug {
- conn.LoggerHttpReq(req)
+ conn.LoggerHTTPReq(req)
}
resp, err := conn.client.Do(req)
if err != nil {
// Transfer failed
- event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
+ conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error())
+ event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
+
return nil, err
}
if conn.config.LogLevel >= Debug {
//print out http resp
- conn.LoggerHttpResp(req, resp)
+ conn.LoggerHTTPResp(req, resp)
}
// Transfer completed
- event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
+ event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
@@ -150,8 +208,8 @@ func (conn Conn) getURLParams(params map[string]interface{}) string {
buf.WriteByte('&')
}
buf.WriteString(url.QueryEscape(k))
- if params[k] != nil {
- buf.WriteString("=" + url.QueryEscape(params[k].(string)))
+ if params[k] != nil && params[k].(string) != "" {
+ buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1))
}
}
@@ -161,9 +219,19 @@ func (conn Conn) getURLParams(params map[string]interface{}) string {
func (conn Conn) getSubResource(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
+ signParams := make(map[string]string)
for k := range params {
- if conn.isParamSign(k) {
+ if conn.config.AuthVersion == AuthV2 || conn.config.AuthVersion == AuthV4 {
+ encodedKey := url.QueryEscape(k)
+ keys = append(keys, encodedKey)
+ if params[k] != nil && params[k] != "" {
+ signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)
+ }
+ } else if conn.isParamSign(k) {
keys = append(keys, k)
+ if params[k] != nil {
+ signParams[k] = params[k].(string)
+ }
}
}
sort.Strings(keys)
@@ -175,11 +243,12 @@ func (conn Conn) getSubResource(params map[string]interface{}) string {
buf.WriteByte('&')
}
buf.WriteString(k)
- if params[k] != nil {
- buf.WriteString("=" + params[k].(string))
+ if _, ok := signParams[k]; ok {
+ if signParams[k] != "" {
+ buf.WriteString("=" + signParams[k])
+ }
}
}
-
return buf.String()
}
@@ -192,10 +261,48 @@ func (conn Conn) isParamSign(paramKey string) bool {
return false
}
-func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
+// getResource gets canonicalized resource
+func (conn Conn) getResource(bucketName, objectName, subResource string) string {
+ if subResource != "" {
+ subResource = "?" + subResource
+ }
+ if bucketName == "" {
+ if conn.config.AuthVersion == AuthV2 {
+ return url.QueryEscape("/") + subResource
+ }
+ return fmt.Sprintf("/%s%s", bucketName, subResource)
+ }
+ if conn.config.AuthVersion == AuthV2 {
+ return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource
+ }
+ return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
+}
+
+// getResource gets canonicalized resource
+func (conn Conn) getResourceV4(bucketName, objectName, subResource string) string {
+ if subResource != "" {
+ subResource = "?" + subResource
+ }
+
+ if bucketName == "" {
+ return fmt.Sprintf("/%s", subResource)
+ }
+
+ if objectName != "" {
+ objectName = url.QueryEscape(objectName)
+ objectName = strings.Replace(objectName, "+", "%20", -1)
+ objectName = strings.Replace(objectName, "%2F", "/", -1)
+ return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
+ }
+ return fmt.Sprintf("/%s/%s", bucketName, subResource)
+}
+
+func (conn Conn) doRequest(ctx context.Context, method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
method = strings.ToUpper(method)
- req := &http.Request{
+ var req *http.Request
+ var err error
+ req = &http.Request{
Method: method,
URL: uri,
Proto: "HTTP/1.1",
@@ -204,7 +311,9 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
Header: make(http.Header),
Host: uri.Host,
}
-
+ if ctx != nil {
+ req = req.WithContext(ctx)
+ }
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
@@ -220,12 +329,18 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
req.Header.Set("Proxy-Authorization", basic)
}
- date := time.Now().UTC().Format(http.TimeFormat)
- req.Header.Set(HTTPHeaderDate, date)
- req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
+ stNow := time.Now().UTC()
+ req.Header.Set(HTTPHeaderDate, stNow.Format(http.TimeFormat))
+ req.Header.Set(HTTPHeaderHost, req.Host)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
- if conn.config.SecurityToken != "" {
- req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken)
+
+ if conn.config.AuthVersion == AuthV4 {
+ req.Header.Set(HttpHeaderOssContentSha256, DefaultContentSha256)
+ }
+
+ akIf := conn.config.GetCredentials()
+ if akIf.GetSecurityToken() != "" {
+ req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken())
}
if headers != nil {
@@ -237,40 +352,40 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
conn.signHeader(req, canonicalizedResource)
// Transfer started
- event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
+ event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
publishProgress(listener, event)
if conn.config.LogLevel >= Debug {
- conn.LoggerHttpReq(req)
+ conn.LoggerHTTPReq(req)
}
resp, err := conn.client.Do(req)
if err != nil {
+ conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error())
// Transfer failed
- event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
+ event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
return nil, err
}
if conn.config.LogLevel >= Debug {
//print out http resp
- conn.LoggerHttpResp(req, resp)
+ conn.LoggerHTTPResp(req, resp)
}
// Transfer completed
- event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
+ event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
- if conn.config.SecurityToken != "" {
- params[HTTPParamSecurityToken] = conn.config.SecurityToken
+ akIf := conn.config.GetCredentials()
+ if akIf.GetSecurityToken() != "" {
+ params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
}
- subResource := conn.getSubResource(params)
- canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource)
m := strings.ToUpper(string(method))
req := &http.Request{
@@ -285,7 +400,6 @@ func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expir
}
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
- req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
@@ -294,12 +408,27 @@ func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expir
}
}
- signedStr := conn.getSignedStr(req, canonicalizedResource)
+ if conn.config.AuthVersion == AuthV2 {
+ params[HTTPParamSignatureVersion] = "OSS2"
+ params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10)
+ params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID
+ additionalList, _ := conn.getAdditionalHeaderKeys(req)
+ if len(additionalList) > 0 {
+ params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";")
+ }
+ }
- params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
- params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
- params[HTTPParamSignature] = signedStr
+ subResource := conn.getSubResource(params)
+ canonicalizedResource := conn.getResource(bucketName, objectName, subResource)
+ signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
+ if conn.config.AuthVersion == AuthV1 {
+ params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
+ params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
+ params[HTTPParamSignature] = signedStr
+ } else if conn.config.AuthVersion == AuthV2 {
+ params[HTTPParamSignatureV2] = signedStr
+ }
urlParams := conn.getURLParams(params)
return conn.url.getSignURL(bucketName, objectName, urlParams)
}
@@ -312,12 +441,13 @@ func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expir
expireStr := strconv.FormatInt(expiration, 10)
params[HTTPParamExpires] = expireStr
- if conn.config.AccessKeyID != "" {
- params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
- if conn.config.SecurityToken != "" {
- params[HTTPParamSecurityToken] = conn.config.SecurityToken
+ akIf := conn.config.GetCredentials()
+ if akIf.GetAccessKeyID() != "" {
+ params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
+ if akIf.GetSecurityToken() != "" {
+ params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
}
- signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, params)
+ signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params)
params[HTTPParamSignature] = signedStr
}
@@ -331,19 +461,9 @@ func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
var file *os.File
var crc hash.Hash64
reader := body
-
- // Length
- switch v := body.(type) {
- case *bytes.Buffer:
- req.ContentLength = int64(v.Len())
- case *bytes.Reader:
- req.ContentLength = int64(v.Len())
- case *strings.Reader:
- req.ContentLength = int64(v.Len())
- case *os.File:
- req.ContentLength = tryGetFileSize(v)
- case *io.LimitedReader:
- req.ContentLength = int64(v.N)
+ readerLen, err := GetReaderLen(reader)
+ if err == nil {
+ req.ContentLength = readerLen
}
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
@@ -356,7 +476,7 @@ func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
// CRC
if reader != nil && conn.config.IsEnableCRC {
- crc = NewCRC(crcTable(), initCRC)
+ crc = NewCRC(CrcTable(), initCRC)
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
}
@@ -403,61 +523,135 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
var srvCRC uint64
statusCode := resp.StatusCode
- if statusCode >= 400 && statusCode <= 505 {
- // 4xx and 5xx indicate that the operation has error occurred
- var respBody []byte
- respBody, err := readResponseBody(resp)
- if err != nil {
- return nil, err
- }
-
- if len(respBody) == 0 {
- err = ServiceError{
- StatusCode: statusCode,
- RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
+ if statusCode/100 != 2 {
+ if statusCode >= 400 && statusCode <= 505 {
+ // 4xx and 5xx indicate that the operation has error occurred
+ var respBody []byte
+ var errorXml []byte
+ respBody, err := readResponseBody(resp)
+ if err != nil {
+ return nil, err
+ }
+ errorXml = respBody
+ if len(respBody) == 0 && len(resp.Header.Get(HTTPHeaderOssErr)) > 0 {
+ errorXml, err = base64.StdEncoding.DecodeString(resp.Header.Get(HTTPHeaderOssErr))
+ if err != nil {
+ errorXml = respBody
+ }
}
+ if len(errorXml) == 0 {
+ err = ServiceError{
+ StatusCode: statusCode,
+ RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
+ Ec: resp.Header.Get(HTTPHeaderOssEc),
+ }
+ } else {
+ srvErr, errIn := serviceErrFromXML(errorXml, resp.StatusCode,
+ resp.Header.Get(HTTPHeaderOssRequestID))
+ if errIn != nil { // error unmarshal the error response
+ if len(resp.Header.Get(HTTPHeaderOssEc)) > 0 {
+ err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s, ec = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID), resp.Header.Get(HTTPHeaderOssEc))
+ } else {
+ err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
+ }
+ } else {
+ err = srvErr
+ }
+ }
+ return &Response{
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
+ }, err
+ } else if statusCode >= 300 && statusCode <= 307 {
+ // OSS use 3xx, but response has no body
+ err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
+ return &Response{
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: resp.Body,
+ }, err
} else {
- // Response contains storage service error object, unmarshal
- srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
- resp.Header.Get(HTTPHeaderOssRequestID))
- if errIn != nil { // error unmarshaling the error response
- err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
+ // (0,300) [308,400) [506,)
+ // Other extended http StatusCode
+ var respBody []byte
+ var errorXml []byte
+ respBody, err := readResponseBody(resp)
+ if err != nil {
+ return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err
+ }
+ errorXml = respBody
+ if len(respBody) == 0 && len(resp.Header.Get(HTTPHeaderOssErr)) > 0 {
+ errorXml, err = base64.StdEncoding.DecodeString(resp.Header.Get(HTTPHeaderOssErr))
+ if err != nil {
+ errorXml = respBody
+ }
+ }
+ if len(errorXml) == 0 {
+ err = ServiceError{
+ StatusCode: statusCode,
+ RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
+ Ec: resp.Header.Get(HTTPHeaderOssEc),
+ }
} else {
- err = srvErr
+ srvErr, errIn := serviceErrFromXML(errorXml, resp.StatusCode,
+ resp.Header.Get(HTTPHeaderOssRequestID))
+ if errIn != nil { // error unmarshal the error response
+ if len(resp.Header.Get(HTTPHeaderOssEc)) > 0 {
+ err = fmt.Errorf("unknown response body, status = %s, RequestId = %s, ec = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID), resp.Header.Get(HTTPHeaderOssEc))
+ } else {
+ err = fmt.Errorf("unknown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
+ }
+ } else {
+ err = srvErr
+ }
}
+ return &Response{
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
+ }, err
}
+ } else {
+ if conn.config.IsEnableCRC && crc != nil {
+ cliCRC = crc.Sum64()
+ }
+ srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
+ realBody := resp.Body
+ if conn.isDownloadLimitResponse(resp) {
+ limitReader := &LimitSpeedReader{
+ reader: realBody,
+ ossLimiter: conn.config.DownloadLimiter,
+ }
+ realBody = limitReader
+ }
+
+ // 2xx, successful
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
- Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
- }, err
- } else if statusCode >= 300 && statusCode <= 307 {
- // OSS use 3xx, but response has no body
- err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
- return &Response{
- StatusCode: resp.StatusCode,
- Headers: resp.Header,
- Body: resp.Body,
- }, err
+ Body: realBody,
+ ClientCRC: cliCRC,
+ ServerCRC: srvCRC,
+ }, nil
}
+}
- if conn.config.IsEnableCRC && crc != nil {
- cliCRC = crc.Sum64()
+// isUploadLimitReq: judge limit upload speed or not
+func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool {
+ if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil {
+ return false
}
- srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
- // 2xx, successful
- return &Response{
- StatusCode: resp.StatusCode,
- Headers: resp.Header,
- Body: resp.Body,
- ClientCRC: cliCRC,
- ServerCRC: srvCRC,
- }, nil
+ if strings.EqualFold(resp.Request.Method, "GET") {
+ return true
+ }
+ return false
}
-func (conn Conn) LoggerHttpReq(req *http.Request) {
+// LoggerHTTPReq Print the header information of the http request
+func (conn Conn) LoggerHTTPReq(req *http.Request) {
var logBuffer bytes.Buffer
logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method))
logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host))
@@ -478,7 +672,8 @@ func (conn Conn) LoggerHttpReq(req *http.Request) {
conn.config.WriteLog(Debug, "%s\n", logBuffer.String())
}
-func (conn Conn) LoggerHttpResp(req *http.Request, resp *http.Response) {
+// LoggerHTTPResp Print Response to http request
+func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) {
var logBuffer bytes.Buffer
logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode))
logBuffer.WriteString(fmt.Sprintf("Header info:"))
@@ -613,9 +808,10 @@ func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
// UrlMaker builds URL and resource
const (
- urlTypeCname = 1
- urlTypeIP = 2
- urlTypeAliyun = 3
+ urlTypeCname = 1
+ urlTypeIP = 2
+ urlTypeAliyun = 3
+ urlTypePathStyle = 4
)
type urlMaker struct {
@@ -626,7 +822,12 @@ type urlMaker struct {
}
// Init parses endpoint
-func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
+func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error {
+ return um.InitExt(endpoint, isCname, isProxy, false)
+}
+
+// InitExt parses endpoint
+func (um *urlMaker) InitExt(endpoint string, isCname bool, isProxy bool, isPathStyle bool) error {
if strings.HasPrefix(endpoint, "http://") {
um.Scheme = "http"
um.NetLoc = endpoint[len("http://"):]
@@ -638,10 +839,18 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
um.NetLoc = endpoint
}
+ //use url.Parse() to get real host
+ strUrl := um.Scheme + "://" + um.NetLoc
+ url, err := url.Parse(strUrl)
+ if err != nil {
+ return err
+ }
+
+ um.NetLoc = url.Host
host, _, err := net.SplitHostPort(um.NetLoc)
if err != nil {
host = um.NetLoc
- if host[0] == '[' && host[len(host)-1] == ']' {
+ if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
}
@@ -651,10 +860,14 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
um.Type = urlTypeIP
} else if isCname {
um.Type = urlTypeCname
+ } else if isPathStyle {
+ um.Type = urlTypePathStyle
} else {
um.Type = urlTypeAliyun
}
um.IsProxy = isProxy
+
+ return nil
}
// getURL gets URL
@@ -697,7 +910,7 @@ func (um urlMaker) buildURL(bucket, object string) (string, string) {
if um.Type == urlTypeCname {
host = um.NetLoc
path = "/" + object
- } else if um.Type == urlTypeIP {
+ } else if um.Type == urlTypeIP || um.Type == urlTypePathStyle {
if bucket == "" {
host = um.NetLoc
path = "/"
@@ -718,13 +931,36 @@ func (um urlMaker) buildURL(bucket, object string) (string, string) {
return host, path
}
-// getResource gets canonicalized resource
-func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
- if subResource != "" {
- subResource = "?" + subResource
- }
- if bucketName == "" {
- return fmt.Sprintf("/%s%s", bucketName, subResource)
+// buildURL builds URL
+func (um urlMaker) buildURLV4(bucket, object string) (string, string) {
+ var host = ""
+ var path = ""
+
+ object = url.QueryEscape(object)
+ object = strings.Replace(object, "+", "%20", -1)
+
+ // no escape /
+ object = strings.Replace(object, "%2F", "/", -1)
+
+ if um.Type == urlTypeCname {
+ host = um.NetLoc
+ path = "/" + object
+ } else if um.Type == urlTypeIP || um.Type == urlTypePathStyle {
+ if bucket == "" {
+ host = um.NetLoc
+ path = "/"
+ } else {
+ host = um.NetLoc
+ path = fmt.Sprintf("/%s/%s", bucket, object)
+ }
+ } else {
+ if bucket == "" {
+ host = um.NetLoc
+ path = "/"
+ } else {
+ host = bucket + "." + um.NetLoc
+ path = fmt.Sprintf("/%s/%s", bucket, object)
+ }
}
- return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
+ return host, path
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
index d1eb4b5f..cd89c70e 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
@@ -19,6 +19,17 @@ const (
ACLDefault ACLType = "default"
)
+// bucket versioning status
+type VersioningStatus string
+
+const (
+ // Versioning Status definition: Enabled
+ VersionEnabled VersioningStatus = "Enabled"
+
+ // Versioning Status definition: Suspended
+ VersionSuspended VersioningStatus = "Suspended"
+)
+
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
type MetadataDirectiveType string
@@ -30,6 +41,26 @@ const (
MetaReplace MetadataDirectiveType = "REPLACE"
)
+// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
+type TaggingDirectiveType string
+
+const (
+ // TaggingCopy the target object's tagging is copied from the source one
+ TaggingCopy TaggingDirectiveType = "COPY"
+
+ // TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
+ TaggingReplace TaggingDirectiveType = "REPLACE"
+)
+
+// AlgorithmType specifying the server side encryption algorithm name
+type AlgorithmType string
+
+const (
+ KMSAlgorithm AlgorithmType = "KMS"
+ AESAlgorithm AlgorithmType = "AES256"
+ SM4Algorithm AlgorithmType = "SM4"
+)
+
// StorageClassType bucket storage type
type StorageClassType string
@@ -42,6 +73,31 @@ const (
// StorageArchive archive
StorageArchive StorageClassType = "Archive"
+
+ // StorageColdArchive cold archive
+ StorageColdArchive StorageClassType = "ColdArchive"
+
+ // StorageDeepColdArchive deep cold archive
+ StorageDeepColdArchive StorageClassType = "DeepColdArchive"
+)
+
+//RedundancyType bucket data Redundancy type
+type DataRedundancyType string
+
+const (
+ // RedundancyLRS Local redundancy, default value
+ RedundancyLRS DataRedundancyType = "LRS"
+
+ // RedundancyZRS Same city redundancy
+ RedundancyZRS DataRedundancyType = "ZRS"
+)
+
+//ObjecthashFuncType
+type ObjecthashFuncType string
+
+const (
+ HashFuncSha1 ObjecthashFuncType = "SHA-1"
+ HashFuncSha256 ObjecthashFuncType = "SHA-256"
)
// PayerType the type of request payer
@@ -49,7 +105,24 @@ type PayerType string
const (
// Requester the requester who send the request
- Requester PayerType = "requester"
+ Requester PayerType = "Requester"
+
+ // BucketOwner the requester who send the request
+ BucketOwner PayerType = "BucketOwner"
+)
+
+//RestoreMode the restore mode for coldArchive object
+type RestoreMode string
+
+const (
+ //RestoreExpedited object will be restored in 1 hour
+ RestoreExpedited RestoreMode = "Expedited"
+
+ //RestoreStandard object will be restored in 2-5 hours
+ RestoreStandard RestoreMode = "Standard"
+
+ //RestoreBulk object will be restored in 5-10 hours
+ RestoreBulk RestoreMode = "Bulk"
)
// HTTPMethod HTTP request method
@@ -97,6 +170,8 @@ const (
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
HTTPHeaderIfMatch = "If-Match"
HTTPHeaderIfNoneMatch = "If-None-Match"
+ HTTPHeaderACReqMethod = "Access-Control-Request-Method"
+ HTTPHeaderACReqHeaders = "Access-Control-Request-Headers"
HTTPHeaderOssACL = "X-Oss-Acl"
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
@@ -104,6 +179,10 @@ const (
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
+ HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption"
+ HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm"
+ HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key"
+ HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5"
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
@@ -118,7 +197,21 @@ const (
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
HTTPHeaderOssCallback = "X-Oss-Callback"
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
- HTTPHeaderOSSRequester = "X-Oss-Request-Payer"
+ HTTPHeaderOssRequester = "X-Oss-Request-Payer"
+ HTTPHeaderOssTagging = "X-Oss-Tagging"
+ HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive"
+ HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit"
+ HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite"
+ HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior"
+ HTTPHeaderOssTaskID = "X-Oss-Task-Id"
+ HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx"
+ HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx"
+ HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap"
+ HttpHeaderOssDate = "X-Oss-Date"
+ HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256"
+ HttpHeaderOssNotification = "X-Oss-Notification"
+ HTTPHeaderOssEc = "X-Oss-Ec"
+ HTTPHeaderOssErr = "X-Oss-Err"
)
// HTTP Param
@@ -128,6 +221,12 @@ const (
HTTPParamSignature = "Signature"
HTTPParamSecurityToken = "security-token"
HTTPParamPlaylistName = "playlistName"
+
+ HTTPParamSignatureVersion = "x-oss-signature-version"
+ HTTPParamExpiresV2 = "x-oss-expires"
+ HTTPParamAccessKeyIDV2 = "x-oss-access-key-id"
+ HTTPParamSignatureV2 = "x-oss-signature"
+ HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers"
)
// Other constants
@@ -142,5 +241,30 @@ const (
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
- Version = "1.9.5" // Go SDK version
+ NullVersion = "null"
+
+ DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature
+
+ Version = "v2.2.10" // Go SDK version
+)
+
+// FrameType
+const (
+ DataFrameType = 8388609
+ ContinuousFrameType = 8388612
+ EndFrameType = 8388613
+ MetaEndFrameCSVType = 8388614
+ MetaEndFrameJSONType = 8388615
+)
+
+// AuthVersion the version of auth
+type AuthVersionType string
+
+const (
+ // AuthV1 v1
+ AuthV1 AuthVersionType = "v1"
+ // AuthV2 v2
+ AuthV2 AuthVersionType = "v2"
+ // AuthV4 v4
+ AuthV4 AuthVersionType = "v4"
)
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
index f0f0857b..90c1b633 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
@@ -14,6 +14,7 @@ import (
"os"
"path/filepath"
"strconv"
+ "time"
)
// DownloadFile downloads files with multipart download.
@@ -30,7 +31,7 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
return errors.New("oss: part size smaller than 1")
}
- uRange, err := getRangeConfig(options)
+ uRange, err := GetRangeConfig(options)
if err != nil {
return err
}
@@ -38,8 +39,14 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
cpConf := getCpConfig(options)
routines := getRoutines(options)
+ var strVersionId string
+ versionId, _ := FindOption(options, "versionId", nil)
+ if versionId != nil {
+ strVersionId = versionId.(string)
+ }
+
if cpConf != nil && cpConf.IsEnable {
- cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath)
+ cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
if cpFilePath != "" {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
}
@@ -48,27 +55,16 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
}
-func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string {
+func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
absPath, _ := filepath.Abs(destFile)
- cpFileName := getCpFileName(src, absPath)
+ cpFileName := getCpFileName(src, absPath, versionId)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
-// getRangeConfig gets the download range from the options.
-func getRangeConfig(options []Option) (*unpackedRange, error) {
- rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
- if err != nil || rangeOpt == nil {
- return nil, err
- }
- return parseRange(rangeOpt.(string))
-}
-
-// ----- concurrent download without checkpoint -----
-
// downloadWorkerArg is download worker's parameters
type downloadWorkerArg struct {
bucket *Bucket
@@ -107,10 +103,12 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
// Resolve options
r := Range(part.Start, part.End)
p := Progress(&defaultDownloadProgressListener{})
- opts := make([]Option, len(arg.options)+2)
+
+ var respHeader http.Header
+ opts := make([]Option, len(arg.options)+3)
// Append orderly, can not be reversed!
opts = append(opts, arg.options...)
- opts = append(opts, r, p)
+ opts = append(opts, r, p, GetResponseHeader(&respHeader))
rd, err := arg.bucket.GetObject(arg.key, opts...)
if err != nil {
@@ -121,7 +119,7 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
var crcCalc hash.Hash64
if arg.enableCRC {
- crcCalc = crc64.New(crcTable())
+ crcCalc = crc64.New(CrcTable())
contentLen := part.End - part.Start + 1
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
}
@@ -146,8 +144,11 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
break
}
+ startT := time.Now().UnixNano() / 1000 / 1000 / 1000
_, err = io.Copy(fd, rd)
+ endT := time.Now().UnixNano() / 1000 / 1000 / 1000
if err != nil {
+ arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error())
fd.Close()
failed <- err
break
@@ -180,11 +181,11 @@ type downloadPart struct {
}
// getDownloadParts gets download parts
-func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart {
+func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart {
parts := []downloadPart{}
part := downloadPart{}
i := 0
- start, end := adjustRange(uRange, objectSize)
+ start, end := AdjustRange(uRange, objectSize)
for offset := start; offset < end; offset += partSize {
part.Index = i
part.Start = offset
@@ -221,15 +222,9 @@ func combineCRCInParts(dps []downloadPart) uint64 {
}
// downloadFile downloads file concurrently without checkpoint.
-func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error {
+func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error {
tempFilePath := filePath + TempFileSuffix
- listener := getProgressListener(options)
-
- payerOptions := []Option{}
- payer := getPayer(options)
- if payer != "" {
- payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
- }
+ listener := GetProgressListener(options)
// If the file does not exist, create one. If exists, the download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
@@ -238,22 +233,25 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
}
fd.Close()
- meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
+ // Get the object detailed meta for object whole size
+ // must delete header:range to get whole object size
+ skipOptions := DeleteOption(options, HTTPHeaderRange)
+ meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
if err != nil {
return err
}
- objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
enableCRC := false
expectedCRC := (uint64)(0)
- if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
- if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+ if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+ if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
enableCRC = true
- expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+ expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
}
}
@@ -266,7 +264,7 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
var completedBytes int64
totalBytes := getObjectBytes(parts)
- event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+ event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start the download workers
@@ -284,13 +282,14 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
select {
case part := <-results:
completed++
- completedBytes += (part.End - part.Start + 1)
+ downBytes := (part.End - part.Start + 1)
+ completedBytes += downBytes
parts[part.Index].CRC64 = part.CRC64
- event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+ event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
- event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
return err
}
@@ -300,12 +299,12 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
}
}
- event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
if enableCRC {
actualCRC := combineCRCInParts(parts)
- err = checkDownloadCRC(actualCRC, expectedCRC)
+ err = CheckDownloadCRC(actualCRC, expectedCRC)
if err != nil {
return err
}
@@ -339,7 +338,7 @@ type objectStat struct {
}
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
-func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) {
+func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) {
// Compare the CP's Magic and the MD5
cpb := cp
cpb.MD5 = ""
@@ -351,7 +350,7 @@ func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (b
return false, nil
}
- objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return false, err
}
@@ -365,7 +364,7 @@ func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (b
// Check the download range
if uRange != nil {
- start, end := adjustRange(uRange, objectSize)
+ start, end := AdjustRange(uRange, objectSize)
if start != cp.Start || end != cp.End {
return false, nil
}
@@ -432,13 +431,13 @@ func (cp downloadCheckpoint) getCompletedBytes() int64 {
}
// prepare initiates download tasks
-func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
+func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error {
// CP
cp.Magic = downloadCpMagic
cp.FilePath = filePath
cp.Object = objectKey
- objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
@@ -447,10 +446,10 @@ func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKe
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
- if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
- if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+ if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+ if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
cp.enableCRC = true
- cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+ cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
}
}
@@ -465,20 +464,17 @@ func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKe
}
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
- os.Remove(cpFilePath)
- return os.Rename(downFilepath, cp.FilePath)
+ err := os.Rename(downFilepath, cp.FilePath)
+ if err != nil {
+ return err
+ }
+ return os.Remove(cpFilePath)
}
// downloadFileWithCp downloads files with checkpoint.
-func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error {
+func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error {
tempFilePath := filePath + TempFileSuffix
- listener := getProgressListener(options)
-
- payerOptions := []Option{}
- payer := getPayer(options)
- if payer != "" {
- payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
- }
+ listener := GetProgressListener(options)
// Load checkpoint data.
dcp := downloadCheckpoint{}
@@ -487,8 +483,10 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
os.Remove(cpFilePath)
}
- // Get the object detailed meta.
- meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
+ // Get the object detailed meta for object whole size
+ // must delete header:range to get whole object size
+ skipOptions := DeleteOption(options, HTTPHeaderRange)
+ meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
if err != nil {
return err
}
@@ -517,7 +515,7 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
die := make(chan bool)
completedBytes := dcp.getCompletedBytes()
- event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
+ event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
// Start the download workers routine
@@ -538,12 +536,13 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
dcp.PartStat[part.Index] = true
dcp.Parts[part.Index].CRC64 = part.CRC64
dcp.dump(cpFilePath)
- completedBytes += (part.End - part.Start + 1)
- event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
+ downBytes := (part.End - part.Start + 1)
+ completedBytes += downBytes
+ event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
- event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
return err
}
@@ -553,12 +552,12 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
}
}
- event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
if dcp.enableCRC {
actualCRC := combineCRCInParts(dcp.Parts)
- err = checkDownloadCRC(actualCRC, dcp.CRC)
+ err = CheckDownloadCRC(actualCRC, dcp.CRC)
if err != nil {
return err
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
index 6d7b4e0f..aaa24f28 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
@@ -3,7 +3,9 @@ package oss
import (
"encoding/xml"
"fmt"
+ "io/ioutil"
"net/http"
+ "strconv"
"strings"
)
@@ -15,18 +17,22 @@ type ServiceError struct {
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
HostID string `xml:"HostId"` // The OSS server cluster's Id
Endpoint string `xml:"Endpoint"`
+ Ec string `xml:"EC"`
RawMessage string // The raw messages from OSS
StatusCode int // HTTP status code
+
}
// Error implements interface error
func (e ServiceError) Error() string {
- if e.Endpoint == "" {
- return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s",
- e.StatusCode, e.Code, e.Message, e.RequestID)
+ errorStr := fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
+ if len(e.Endpoint) > 0 {
+ errorStr = fmt.Sprintf("%s, Endpoint=%s", errorStr, e.Endpoint)
+ }
+ if len(e.Ec) > 0 {
+ errorStr = fmt.Sprintf("%s, Ec=%s", errorStr, e.Ec)
}
- return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s",
- e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint)
+ return errorStr
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
@@ -54,9 +60,9 @@ func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
-// checkRespCode returns UnexpectedStatusError if the given response code is not
+// CheckRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
-func checkRespCode(respCode int, allowed []int) error {
+func CheckRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
@@ -65,6 +71,42 @@ func checkRespCode(respCode int, allowed []int) error {
return UnexpectedStatusCodeError{allowed, respCode}
}
+// CheckCallbackResp return error if the given response code is not 200
+func CheckCallbackResp(resp *Response) error {
+ var err error
+ contentLengthStr := resp.Headers.Get("Content-Length")
+ contentLength, _ := strconv.Atoi(contentLengthStr)
+ var bodyBytes []byte
+ if contentLength > 0 {
+ bodyBytes, _ = ioutil.ReadAll(resp.Body)
+ }
+ if len(bodyBytes) > 0 {
+ srvErr, errIn := serviceErrFromXML(bodyBytes, resp.StatusCode,
+ resp.Headers.Get(HTTPHeaderOssRequestID))
+ if errIn != nil {
+ if len(resp.Headers.Get(HTTPHeaderOssEc)) > 0 {
+ err = fmt.Errorf("unknown response body, status code = %d, RequestId = %s, ec = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID), resp.Headers.Get(HTTPHeaderOssEc))
+ } else {
+ err = fmt.Errorf("unknown response body, status code= %d, RequestId = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
+ }
+ } else {
+ err = srvErr
+ }
+ }
+ return err
+}
+
+func tryConvertServiceError(data []byte, resp *Response, def error) (err error) {
+ err = def
+ if len(data) > 0 {
+ srvErr, errIn := serviceErrFromXML(data, resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
+ if errIn == nil {
+ err = srvErr
+ }
+ }
+ return err
+}
+
// CRCCheckError is returned when crc check is inconsistent between client and server
type CRCCheckError struct {
clientCRC uint64 // Calculated CRC64 in client
@@ -79,14 +121,14 @@ func (e CRCCheckError) Error() string {
e.operation, e.clientCRC, e.serverCRC, e.requestID)
}
-func checkDownloadCRC(clientCRC, serverCRC uint64) error {
+func CheckDownloadCRC(clientCRC, serverCRC uint64) error {
if clientCRC == serverCRC {
return nil
}
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
}
-func checkCRC(resp *Response, operation string) error {
+func CheckCRC(resp *Response, operation string) error {
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
return nil
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
index 943dc8fd..2293f990 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
@@ -1,3 +1,4 @@
+//go:build !go1.7
// +build !go1.7
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
index 012f9896..ea0826ce 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
@@ -1,3 +1,4 @@
+//go:build go1.7
// +build go1.7
package oss
@@ -15,13 +16,13 @@ const (
perTokenBandwidthSize int = 1024
)
-// OssLimiter: wrapper rate.Limiter
+// OssLimiter wrapper rate.Limiter
type OssLimiter struct {
limiter *rate.Limiter
}
-// GetOssLimiter:create OssLimiter
-// uploadSpeed:KB/s
+// GetOssLimiter create OssLimiter
+// uploadSpeed KB/s
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
@@ -33,7 +34,7 @@ func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
}, nil
}
-// LimitSpeedReader: for limit bandwidth upload
+// LimitSpeedReader for limit bandwidth upload
type LimitSpeedReader struct {
io.ReadCloser
reader io.Reader
@@ -73,10 +74,9 @@ func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
start, end, burst, perTokenBandwidthSize)
return
- } else {
- timeDelay := re.Delay()
- time.Sleep(timeDelay)
}
+ timeDelay := re.Delay()
+ time.Sleep(timeDelay)
}
return
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
index 00393061..1fd30fdf 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
@@ -61,7 +61,7 @@ func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error {
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
@@ -86,7 +86,7 @@ func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
@@ -196,14 +196,14 @@ func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHisto
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) {
var out ListLiveChannelResult
- params, err := getRawParams(options)
+ params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["live"] = nil
- resp, err := bucket.do("GET", "", params, nil, nil, nil)
+ resp, err := bucket.doInner("GET", "", params, nil, nil, nil)
if err != nil {
return out, err
}
@@ -234,7 +234,7 @@ func (bucket Bucket) DeleteLiveChannel(channelName string) error {
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
index 11485973..96a9ee7a 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
@@ -7,231 +7,558 @@ import (
)
var extToMimeType = map[string]string{
- ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
- ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
- ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
- ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
- ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
- ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
- ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
- ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
- ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
- ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
- ".apk": "application/vnd.android.package-archive",
- ".hqx": "application/mac-binhex40",
- ".cpt": "application/mac-compactpro",
- ".doc": "application/msword",
- ".ogg": "application/ogg",
- ".pdf": "application/pdf",
- ".rtf": "text/rtf",
- ".mif": "application/vnd.mif",
- ".xls": "application/vnd.ms-excel",
- ".ppt": "application/vnd.ms-powerpoint",
- ".odc": "application/vnd.oasis.opendocument.chart",
- ".odb": "application/vnd.oasis.opendocument.database",
- ".odf": "application/vnd.oasis.opendocument.formula",
- ".odg": "application/vnd.oasis.opendocument.graphics",
- ".otg": "application/vnd.oasis.opendocument.graphics-template",
- ".odi": "application/vnd.oasis.opendocument.image",
- ".odp": "application/vnd.oasis.opendocument.presentation",
- ".otp": "application/vnd.oasis.opendocument.presentation-template",
- ".ods": "application/vnd.oasis.opendocument.spreadsheet",
- ".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
- ".odt": "application/vnd.oasis.opendocument.text",
- ".odm": "application/vnd.oasis.opendocument.text-master",
- ".ott": "application/vnd.oasis.opendocument.text-template",
- ".oth": "application/vnd.oasis.opendocument.text-web",
- ".sxw": "application/vnd.sun.xml.writer",
- ".stw": "application/vnd.sun.xml.writer.template",
- ".sxc": "application/vnd.sun.xml.calc",
- ".stc": "application/vnd.sun.xml.calc.template",
- ".sxd": "application/vnd.sun.xml.draw",
- ".std": "application/vnd.sun.xml.draw.template",
- ".sxi": "application/vnd.sun.xml.impress",
- ".sti": "application/vnd.sun.xml.impress.template",
- ".sxg": "application/vnd.sun.xml.writer.global",
- ".sxm": "application/vnd.sun.xml.math",
- ".sis": "application/vnd.symbian.install",
- ".wbxml": "application/vnd.wap.wbxml",
- ".wmlc": "application/vnd.wap.wmlc",
- ".wmlsc": "application/vnd.wap.wmlscriptc",
- ".bcpio": "application/x-bcpio",
- ".torrent": "application/x-bittorrent",
- ".bz2": "application/x-bzip2",
- ".vcd": "application/x-cdlink",
- ".pgn": "application/x-chess-pgn",
- ".cpio": "application/x-cpio",
- ".csh": "application/x-csh",
- ".dvi": "application/x-dvi",
- ".spl": "application/x-futuresplash",
- ".gtar": "application/x-gtar",
- ".hdf": "application/x-hdf",
- ".jar": "application/x-java-archive",
- ".jnlp": "application/x-java-jnlp-file",
- ".js": "application/x-javascript",
- ".ksp": "application/x-kspread",
- ".chrt": "application/x-kchart",
- ".kil": "application/x-killustrator",
- ".latex": "application/x-latex",
- ".rpm": "application/x-rpm",
- ".sh": "application/x-sh",
- ".shar": "application/x-shar",
- ".swf": "application/x-shockwave-flash",
- ".sit": "application/x-stuffit",
- ".sv4cpio": "application/x-sv4cpio",
- ".sv4crc": "application/x-sv4crc",
- ".tar": "application/x-tar",
- ".tcl": "application/x-tcl",
- ".tex": "application/x-tex",
- ".man": "application/x-troff-man",
- ".me": "application/x-troff-me",
- ".ms": "application/x-troff-ms",
- ".ustar": "application/x-ustar",
- ".src": "application/x-wais-source",
- ".zip": "application/zip",
- ".m3u": "audio/x-mpegurl",
- ".ra": "audio/x-pn-realaudio",
- ".wav": "audio/x-wav",
- ".wma": "audio/x-ms-wma",
- ".wax": "audio/x-ms-wax",
- ".pdb": "chemical/x-pdb",
- ".xyz": "chemical/x-xyz",
- ".bmp": "image/bmp",
- ".gif": "image/gif",
- ".ief": "image/ief",
- ".png": "image/png",
- ".wbmp": "image/vnd.wap.wbmp",
- ".ras": "image/x-cmu-raster",
- ".pnm": "image/x-portable-anymap",
- ".pbm": "image/x-portable-bitmap",
- ".pgm": "image/x-portable-graymap",
- ".ppm": "image/x-portable-pixmap",
- ".rgb": "image/x-rgb",
- ".xbm": "image/x-xbitmap",
- ".xpm": "image/x-xpixmap",
- ".xwd": "image/x-xwindowdump",
- ".css": "text/css",
- ".rtx": "text/richtext",
- ".tsv": "text/tab-separated-values",
- ".jad": "text/vnd.sun.j2me.app-descriptor",
- ".wml": "text/vnd.wap.wml",
- ".wmls": "text/vnd.wap.wmlscript",
- ".etx": "text/x-setext",
- ".mxu": "video/vnd.mpegurl",
- ".flv": "video/x-flv",
- ".wm": "video/x-ms-wm",
- ".wmv": "video/x-ms-wmv",
- ".wmx": "video/x-ms-wmx",
- ".wvx": "video/x-ms-wvx",
- ".avi": "video/x-msvideo",
- ".movie": "video/x-sgi-movie",
- ".ice": "x-conference/x-cooltalk",
- ".3gp": "video/3gpp",
- ".ai": "application/postscript",
- ".aif": "audio/x-aiff",
- ".aifc": "audio/x-aiff",
- ".aiff": "audio/x-aiff",
- ".asc": "text/plain",
- ".atom": "application/atom+xml",
- ".au": "audio/basic",
- ".bin": "application/octet-stream",
- ".cdf": "application/x-netcdf",
- ".cgm": "image/cgm",
- ".class": "application/octet-stream",
- ".dcr": "application/x-director",
- ".dif": "video/x-dv",
- ".dir": "application/x-director",
- ".djv": "image/vnd.djvu",
- ".djvu": "image/vnd.djvu",
- ".dll": "application/octet-stream",
- ".dmg": "application/octet-stream",
- ".dms": "application/octet-stream",
- ".dtd": "application/xml-dtd",
- ".dv": "video/x-dv",
- ".dxr": "application/x-director",
- ".eps": "application/postscript",
- ".exe": "application/octet-stream",
- ".ez": "application/andrew-inset",
- ".gram": "application/srgs",
- ".grxml": "application/srgs+xml",
- ".gz": "application/x-gzip",
- ".htm": "text/html",
- ".html": "text/html",
- ".ico": "image/x-icon",
- ".ics": "text/calendar",
- ".ifb": "text/calendar",
- ".iges": "model/iges",
- ".igs": "model/iges",
- ".jp2": "image/jp2",
- ".jpe": "image/jpeg",
- ".jpeg": "image/jpeg",
- ".jpg": "image/jpeg",
- ".kar": "audio/midi",
- ".lha": "application/octet-stream",
- ".lzh": "application/octet-stream",
- ".m4a": "audio/mp4a-latm",
- ".m4p": "audio/mp4a-latm",
- ".m4u": "video/vnd.mpegurl",
- ".m4v": "video/x-m4v",
- ".mac": "image/x-macpaint",
- ".mathml": "application/mathml+xml",
- ".mesh": "model/mesh",
- ".mid": "audio/midi",
- ".midi": "audio/midi",
- ".mov": "video/quicktime",
- ".mp2": "audio/mpeg",
- ".mp3": "audio/mpeg",
- ".mp4": "video/mp4",
- ".mpe": "video/mpeg",
- ".mpeg": "video/mpeg",
- ".mpg": "video/mpeg",
- ".mpga": "audio/mpeg",
- ".msh": "model/mesh",
- ".nc": "application/x-netcdf",
- ".oda": "application/oda",
- ".ogv": "video/ogv",
- ".pct": "image/pict",
- ".pic": "image/pict",
- ".pict": "image/pict",
- ".pnt": "image/x-macpaint",
- ".pntg": "image/x-macpaint",
- ".ps": "application/postscript",
- ".qt": "video/quicktime",
- ".qti": "image/x-quicktime",
- ".qtif": "image/x-quicktime",
- ".ram": "audio/x-pn-realaudio",
- ".rdf": "application/rdf+xml",
- ".rm": "application/vnd.rn-realmedia",
- ".roff": "application/x-troff",
- ".sgm": "text/sgml",
- ".sgml": "text/sgml",
- ".silo": "model/mesh",
- ".skd": "application/x-koan",
- ".skm": "application/x-koan",
- ".skp": "application/x-koan",
- ".skt": "application/x-koan",
- ".smi": "application/smil",
- ".smil": "application/smil",
- ".snd": "audio/basic",
- ".so": "application/octet-stream",
- ".svg": "image/svg+xml",
- ".t": "application/x-troff",
- ".texi": "application/x-texinfo",
- ".texinfo": "application/x-texinfo",
- ".tif": "image/tiff",
- ".tiff": "image/tiff",
- ".tr": "application/x-troff",
- ".txt": "text/plain",
- ".vrml": "model/vrml",
- ".vxml": "application/voicexml+xml",
- ".webm": "video/webm",
- ".wrl": "model/vrml",
- ".xht": "application/xhtml+xml",
- ".xhtml": "application/xhtml+xml",
- ".xml": "application/xml",
- ".xsl": "application/xml",
- ".xslt": "application/xslt+xml",
- ".xul": "application/vnd.mozilla.xul+xml",
+ ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
+ ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
+ ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
+ ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
+ ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
+ ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
+ ".apk": "application/vnd.android.package-archive",
+ ".hqx": "application/mac-binhex40",
+ ".cpt": "application/mac-compactpro",
+ ".doc": "application/msword",
+ ".ogg": "application/ogg",
+ ".pdf": "application/pdf",
+ ".rtf": "text/rtf",
+ ".mif": "application/vnd.mif",
+ ".xls": "application/vnd.ms-excel",
+ ".ppt": "application/vnd.ms-powerpoint",
+ ".odc": "application/vnd.oasis.opendocument.chart",
+ ".odb": "application/vnd.oasis.opendocument.database",
+ ".odf": "application/vnd.oasis.opendocument.formula",
+ ".odg": "application/vnd.oasis.opendocument.graphics",
+ ".otg": "application/vnd.oasis.opendocument.graphics-template",
+ ".odi": "application/vnd.oasis.opendocument.image",
+ ".odp": "application/vnd.oasis.opendocument.presentation",
+ ".otp": "application/vnd.oasis.opendocument.presentation-template",
+ ".ods": "application/vnd.oasis.opendocument.spreadsheet",
+ ".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
+ ".odt": "application/vnd.oasis.opendocument.text",
+ ".odm": "application/vnd.oasis.opendocument.text-master",
+ ".ott": "application/vnd.oasis.opendocument.text-template",
+ ".oth": "application/vnd.oasis.opendocument.text-web",
+ ".sxw": "application/vnd.sun.xml.writer",
+ ".stw": "application/vnd.sun.xml.writer.template",
+ ".sxc": "application/vnd.sun.xml.calc",
+ ".stc": "application/vnd.sun.xml.calc.template",
+ ".sxd": "application/vnd.sun.xml.draw",
+ ".std": "application/vnd.sun.xml.draw.template",
+ ".sxi": "application/vnd.sun.xml.impress",
+ ".sti": "application/vnd.sun.xml.impress.template",
+ ".sxg": "application/vnd.sun.xml.writer.global",
+ ".sxm": "application/vnd.sun.xml.math",
+ ".sis": "application/vnd.symbian.install",
+ ".wbxml": "application/vnd.wap.wbxml",
+ ".wmlc": "application/vnd.wap.wmlc",
+ ".wmlsc": "application/vnd.wap.wmlscriptc",
+ ".bcpio": "application/x-bcpio",
+ ".torrent": "application/x-bittorrent",
+ ".bz2": "application/x-bzip2",
+ ".vcd": "application/x-cdlink",
+ ".pgn": "application/x-chess-pgn",
+ ".cpio": "application/x-cpio",
+ ".csh": "application/x-csh",
+ ".dvi": "application/x-dvi",
+ ".spl": "application/x-futuresplash",
+ ".gtar": "application/x-gtar",
+ ".hdf": "application/x-hdf",
+ ".jar": "application/x-java-archive",
+ ".jnlp": "application/x-java-jnlp-file",
+ ".js": "application/x-javascript",
+ ".ksp": "application/x-kspread",
+ ".chrt": "application/x-kchart",
+ ".kil": "application/x-killustrator",
+ ".latex": "application/x-latex",
+ ".rpm": "application/x-rpm",
+ ".sh": "application/x-sh",
+ ".shar": "application/x-shar",
+ ".swf": "application/x-shockwave-flash",
+ ".sit": "application/x-stuffit",
+ ".sv4cpio": "application/x-sv4cpio",
+ ".sv4crc": "application/x-sv4crc",
+ ".tar": "application/x-tar",
+ ".tcl": "application/x-tcl",
+ ".tex": "application/x-tex",
+ ".man": "application/x-troff-man",
+ ".me": "application/x-troff-me",
+ ".ms": "application/x-troff-ms",
+ ".ustar": "application/x-ustar",
+ ".src": "application/x-wais-source",
+ ".zip": "application/zip",
+ ".m3u": "audio/x-mpegurl",
+ ".ra": "audio/x-pn-realaudio",
+ ".wav": "audio/x-wav",
+ ".wma": "audio/x-ms-wma",
+ ".wax": "audio/x-ms-wax",
+ ".pdb": "chemical/x-pdb",
+ ".xyz": "chemical/x-xyz",
+ ".bmp": "image/bmp",
+ ".gif": "image/gif",
+ ".ief": "image/ief",
+ ".png": "image/png",
+ ".wbmp": "image/vnd.wap.wbmp",
+ ".ras": "image/x-cmu-raster",
+ ".pnm": "image/x-portable-anymap",
+ ".pbm": "image/x-portable-bitmap",
+ ".pgm": "image/x-portable-graymap",
+ ".ppm": "image/x-portable-pixmap",
+ ".rgb": "image/x-rgb",
+ ".xbm": "image/x-xbitmap",
+ ".xpm": "image/x-xpixmap",
+ ".xwd": "image/x-xwindowdump",
+ ".css": "text/css",
+ ".rtx": "text/richtext",
+ ".tsv": "text/tab-separated-values",
+ ".jad": "text/vnd.sun.j2me.app-descriptor",
+ ".wml": "text/vnd.wap.wml",
+ ".wmls": "text/vnd.wap.wmlscript",
+ ".etx": "text/x-setext",
+ ".mxu": "video/vnd.mpegurl",
+ ".flv": "video/x-flv",
+ ".wm": "video/x-ms-wm",
+ ".wmv": "video/x-ms-wmv",
+ ".wmx": "video/x-ms-wmx",
+ ".wvx": "video/x-ms-wvx",
+ ".avi": "video/x-msvideo",
+ ".movie": "video/x-sgi-movie",
+ ".ice": "x-conference/x-cooltalk",
+ ".3gp": "video/3gpp",
+ ".ai": "application/postscript",
+ ".aif": "audio/x-aiff",
+ ".aifc": "audio/x-aiff",
+ ".aiff": "audio/x-aiff",
+ ".asc": "text/plain",
+ ".atom": "application/atom+xml",
+ ".au": "audio/basic",
+ ".bin": "application/octet-stream",
+ ".cdf": "application/x-netcdf",
+ ".cgm": "image/cgm",
+ ".class": "application/octet-stream",
+ ".dcr": "application/x-director",
+ ".dif": "video/x-dv",
+ ".dir": "application/x-director",
+ ".djv": "image/vnd.djvu",
+ ".djvu": "image/vnd.djvu",
+ ".dll": "application/octet-stream",
+ ".dmg": "application/octet-stream",
+ ".dms": "application/octet-stream",
+ ".dtd": "application/xml-dtd",
+ ".dv": "video/x-dv",
+ ".dxr": "application/x-director",
+ ".eps": "application/postscript",
+ ".exe": "application/octet-stream",
+ ".ez": "application/andrew-inset",
+ ".gram": "application/srgs",
+ ".grxml": "application/srgs+xml",
+ ".gz": "application/x-gzip",
+ ".htm": "text/html",
+ ".html": "text/html",
+ ".ico": "image/x-icon",
+ ".ics": "text/calendar",
+ ".ifb": "text/calendar",
+ ".iges": "model/iges",
+ ".igs": "model/iges",
+ ".jp2": "image/jp2",
+ ".jpe": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".jpg": "image/jpeg",
+ ".kar": "audio/midi",
+ ".lha": "application/octet-stream",
+ ".lzh": "application/octet-stream",
+ ".m4a": "audio/mp4a-latm",
+ ".m4p": "audio/mp4a-latm",
+ ".m4u": "video/vnd.mpegurl",
+ ".m4v": "video/x-m4v",
+ ".mac": "image/x-macpaint",
+ ".mathml": "application/mathml+xml",
+ ".mesh": "model/mesh",
+ ".mid": "audio/midi",
+ ".midi": "audio/midi",
+ ".mov": "video/quicktime",
+ ".mp2": "audio/mpeg",
+ ".mp3": "audio/mpeg",
+ ".mp4": "video/mp4",
+ ".mpe": "video/mpeg",
+ ".mpeg": "video/mpeg",
+ ".mpg": "video/mpeg",
+ ".mpga": "audio/mpeg",
+ ".msh": "model/mesh",
+ ".nc": "application/x-netcdf",
+ ".oda": "application/oda",
+ ".ogv": "video/ogv",
+ ".pct": "image/pict",
+ ".pic": "image/pict",
+ ".pict": "image/pict",
+ ".pnt": "image/x-macpaint",
+ ".pntg": "image/x-macpaint",
+ ".ps": "application/postscript",
+ ".qt": "video/quicktime",
+ ".qti": "image/x-quicktime",
+ ".qtif": "image/x-quicktime",
+ ".ram": "audio/x-pn-realaudio",
+ ".rdf": "application/rdf+xml",
+ ".rm": "application/vnd.rn-realmedia",
+ ".roff": "application/x-troff",
+ ".sgm": "text/sgml",
+ ".sgml": "text/sgml",
+ ".silo": "model/mesh",
+ ".skd": "application/x-koan",
+ ".skm": "application/x-koan",
+ ".skp": "application/x-koan",
+ ".skt": "application/x-koan",
+ ".smi": "application/smil",
+ ".smil": "application/smil",
+ ".snd": "audio/basic",
+ ".so": "application/octet-stream",
+ ".svg": "image/svg+xml",
+ ".t": "application/x-troff",
+ ".texi": "application/x-texinfo",
+ ".texinfo": "application/x-texinfo",
+ ".tif": "image/tiff",
+ ".tiff": "image/tiff",
+ ".tr": "application/x-troff",
+ ".txt": "text/plain",
+ ".vrml": "model/vrml",
+ ".vxml": "application/voicexml+xml",
+ ".webm": "video/webm",
+ ".wrl": "model/vrml",
+ ".xht": "application/xhtml+xml",
+ ".xhtml": "application/xhtml+xml",
+ ".xml": "application/xml",
+ ".xsl": "application/xml",
+ ".xslt": "application/xslt+xml",
+ ".xul": "application/vnd.mozilla.xul+xml",
+ ".webp": "image/webp",
+ ".323": "text/h323",
+ ".aab": "application/x-authoware-bin",
+ ".aam": "application/x-authoware-map",
+ ".aas": "application/x-authoware-seg",
+ ".acx": "application/internet-property-stream",
+ ".als": "audio/X-Alpha5",
+ ".amc": "application/x-mpeg",
+ ".ani": "application/octet-stream",
+ ".asd": "application/astound",
+ ".asf": "video/x-ms-asf",
+ ".asn": "application/astound",
+ ".asp": "application/x-asap",
+ ".asr": "video/x-ms-asf",
+ ".asx": "video/x-ms-asf",
+ ".avb": "application/octet-stream",
+ ".awb": "audio/amr-wb",
+ ".axs": "application/olescript",
+ ".bas": "text/plain",
+ ".bin ": "application/octet-stream",
+ ".bld": "application/bld",
+ ".bld2": "application/bld2",
+ ".bpk": "application/octet-stream",
+ ".c": "text/plain",
+ ".cal": "image/x-cals",
+ ".cat": "application/vnd.ms-pkiseccat",
+ ".ccn": "application/x-cnc",
+ ".cco": "application/x-cocoa",
+ ".cer": "application/x-x509-ca-cert",
+ ".cgi": "magnus-internal/cgi",
+ ".chat": "application/x-chat",
+ ".clp": "application/x-msclip",
+ ".cmx": "image/x-cmx",
+ ".co": "application/x-cult3d-object",
+ ".cod": "image/cis-cod",
+ ".conf": "text/plain",
+ ".cpp": "text/plain",
+ ".crd": "application/x-mscardfile",
+ ".crl": "application/pkix-crl",
+ ".crt": "application/x-x509-ca-cert",
+ ".csm": "chemical/x-csml",
+ ".csml": "chemical/x-csml",
+ ".cur": "application/octet-stream",
+ ".dcm": "x-lml/x-evm",
+ ".dcx": "image/x-dcx",
+ ".der": "application/x-x509-ca-cert",
+ ".dhtml": "text/html",
+ ".dot": "application/msword",
+ ".dwf": "drawing/x-dwf",
+ ".dwg": "application/x-autocad",
+ ".dxf": "application/x-autocad",
+ ".ebk": "application/x-expandedbook",
+ ".emb": "chemical/x-embl-dl-nucleotide",
+ ".embl": "chemical/x-embl-dl-nucleotide",
+ ".epub": "application/epub+zip",
+ ".eri": "image/x-eri",
+ ".es": "audio/echospeech",
+ ".esl": "audio/echospeech",
+ ".etc": "application/x-earthtime",
+ ".evm": "x-lml/x-evm",
+ ".evy": "application/envoy",
+ ".fh4": "image/x-freehand",
+ ".fh5": "image/x-freehand",
+ ".fhc": "image/x-freehand",
+ ".fif": "application/fractals",
+ ".flr": "x-world/x-vrml",
+ ".fm": "application/x-maker",
+ ".fpx": "image/x-fpx",
+ ".fvi": "video/isivideo",
+ ".gau": "chemical/x-gaussian-input",
+ ".gca": "application/x-gca-compressed",
+ ".gdb": "x-lml/x-gdb",
+ ".gps": "application/x-gps",
+ ".h": "text/plain",
+ ".hdm": "text/x-hdml",
+ ".hdml": "text/x-hdml",
+ ".hlp": "application/winhlp",
+ ".hta": "application/hta",
+ ".htc": "text/x-component",
+ ".hts": "text/html",
+ ".htt": "text/webviewhtml",
+ ".ifm": "image/gif",
+ ".ifs": "image/ifs",
+ ".iii": "application/x-iphone",
+ ".imy": "audio/melody",
+ ".ins": "application/x-internet-signup",
+ ".ips": "application/x-ipscript",
+ ".ipx": "application/x-ipix",
+ ".isp": "application/x-internet-signup",
+ ".it": "audio/x-mod",
+ ".itz": "audio/x-mod",
+ ".ivr": "i-world/i-vrml",
+ ".j2k": "image/j2k",
+ ".jam": "application/x-jam",
+ ".java": "text/plain",
+ ".jfif": "image/pipeg",
+ ".jpz": "image/jpeg",
+ ".jwc": "application/jwc",
+ ".kjx": "application/x-kjx",
+ ".lak": "x-lml/x-lak",
+ ".lcc": "application/fastman",
+ ".lcl": "application/x-digitalloca",
+ ".lcr": "application/x-digitalloca",
+ ".lgh": "application/lgh",
+ ".lml": "x-lml/x-lml",
+ ".lmlpack": "x-lml/x-lmlpack",
+ ".log": "text/plain",
+ ".lsf": "video/x-la-asf",
+ ".lsx": "video/x-la-asf",
+ ".m13": "application/x-msmediaview",
+ ".m14": "application/x-msmediaview",
+ ".m15": "audio/x-mod",
+ ".m3url": "audio/x-mpegurl",
+ ".m4b": "audio/mp4a-latm",
+ ".ma1": "audio/ma1",
+ ".ma2": "audio/ma2",
+ ".ma3": "audio/ma3",
+ ".ma5": "audio/ma5",
+ ".map": "magnus-internal/imagemap",
+ ".mbd": "application/mbedlet",
+ ".mct": "application/x-mascot",
+ ".mdb": "application/x-msaccess",
+ ".mdz": "audio/x-mod",
+ ".mel": "text/x-vmel",
+ ".mht": "message/rfc822",
+ ".mhtml": "message/rfc822",
+ ".mi": "application/x-mif",
+ ".mil": "image/x-cals",
+ ".mio": "audio/x-mio",
+ ".mmf": "application/x-skt-lbs",
+ ".mng": "video/x-mng",
+ ".mny": "application/x-msmoney",
+ ".moc": "application/x-mocha",
+ ".mocha": "application/x-mocha",
+ ".mod": "audio/x-mod",
+ ".mof": "application/x-yumekara",
+ ".mol": "chemical/x-mdl-molfile",
+ ".mop": "chemical/x-mopac-input",
+ ".mpa": "video/mpeg",
+ ".mpc": "application/vnd.mpohun.certificate",
+ ".mpg4": "video/mp4",
+ ".mpn": "application/vnd.mophun.application",
+ ".mpp": "application/vnd.ms-project",
+ ".mps": "application/x-mapserver",
+ ".mpv2": "video/mpeg",
+ ".mrl": "text/x-mrml",
+ ".mrm": "application/x-mrm",
+ ".msg": "application/vnd.ms-outlook",
+ ".mts": "application/metastream",
+ ".mtx": "application/metastream",
+ ".mtz": "application/metastream",
+ ".mvb": "application/x-msmediaview",
+ ".mzv": "application/metastream",
+ ".nar": "application/zip",
+ ".nbmp": "image/nbmp",
+ ".ndb": "x-lml/x-ndb",
+ ".ndwn": "application/ndwn",
+ ".nif": "application/x-nif",
+ ".nmz": "application/x-scream",
+ ".nokia-op-logo": "image/vnd.nok-oplogo-color",
+ ".npx": "application/x-netfpx",
+ ".nsnd": "audio/nsnd",
+ ".nva": "application/x-neva1",
+ ".nws": "message/rfc822",
+ ".oom": "application/x-AtlasMate-Plugin",
+ ".p10": "application/pkcs10",
+ ".p12": "application/x-pkcs12",
+ ".p7b": "application/x-pkcs7-certificates",
+ ".p7c": "application/x-pkcs7-mime",
+ ".p7m": "application/x-pkcs7-mime",
+ ".p7r": "application/x-pkcs7-certreqresp",
+ ".p7s": "application/x-pkcs7-signature",
+ ".pac": "audio/x-pac",
+ ".pae": "audio/x-epac",
+ ".pan": "application/x-pan",
+ ".pcx": "image/x-pcx",
+ ".pda": "image/x-pda",
+ ".pfr": "application/font-tdpfr",
+ ".pfx": "application/x-pkcs12",
+ ".pko": "application/ynd.ms-pkipko",
+ ".pm": "application/x-perl",
+ ".pma": "application/x-perfmon",
+ ".pmc": "application/x-perfmon",
+ ".pmd": "application/x-pmd",
+ ".pml": "application/x-perfmon",
+ ".pmr": "application/x-perfmon",
+ ".pmw": "application/x-perfmon",
+ ".pnz": "image/png",
+ ".pot,": "application/vnd.ms-powerpoint",
+ ".pps": "application/vnd.ms-powerpoint",
+ ".pqf": "application/x-cprplayer",
+ ".pqi": "application/cprplayer",
+ ".prc": "application/x-prc",
+ ".prf": "application/pics-rules",
+ ".prop": "text/plain",
+ ".proxy": "application/x-ns-proxy-autoconfig",
+ ".ptlk": "application/listenup",
+ ".pub": "application/x-mspublisher",
+ ".pvx": "video/x-pv-pvx",
+ ".qcp": "audio/vnd.qcelp",
+ ".r3t": "text/vnd.rn-realtext3d",
+ ".rar": "application/octet-stream",
+ ".rc": "text/plain",
+ ".rf": "image/vnd.rn-realflash",
+ ".rlf": "application/x-richlink",
+ ".rmf": "audio/x-rmf",
+ ".rmi": "audio/mid",
+ ".rmm": "audio/x-pn-realaudio",
+ ".rmvb": "audio/x-pn-realaudio",
+ ".rnx": "application/vnd.rn-realplayer",
+ ".rp": "image/vnd.rn-realpix",
+ ".rt": "text/vnd.rn-realtext",
+ ".rte": "x-lml/x-gps",
+ ".rtg": "application/metastream",
+ ".rv": "video/vnd.rn-realvideo",
+ ".rwc": "application/x-rogerwilco",
+ ".s3m": "audio/x-mod",
+ ".s3z": "audio/x-mod",
+ ".sca": "application/x-supercard",
+ ".scd": "application/x-msschedule",
+ ".sct": "text/scriptlet",
+ ".sdf": "application/e-score",
+ ".sea": "application/x-stuffit",
+ ".setpay": "application/set-payment-initiation",
+ ".setreg": "application/set-registration-initiation",
+ ".shtml": "text/html",
+ ".shtm": "text/html",
+ ".shw": "application/presentations",
+ ".si6": "image/si6",
+ ".si7": "image/vnd.stiwap.sis",
+ ".si9": "image/vnd.lgtwap.sis",
+ ".slc": "application/x-salsa",
+ ".smd": "audio/x-smd",
+ ".smp": "application/studiom",
+ ".smz": "audio/x-smd",
+ ".spc": "application/x-pkcs7-certificates",
+ ".spr": "application/x-sprite",
+ ".sprite": "application/x-sprite",
+ ".sdp": "application/sdp",
+ ".spt": "application/x-spt",
+ ".sst": "application/vnd.ms-pkicertstore",
+ ".stk": "application/hyperstudio",
+ ".stl": "application/vnd.ms-pkistl",
+ ".stm": "text/html",
+ ".svf": "image/vnd",
+ ".svh": "image/svh",
+ ".svr": "x-world/x-svr",
+ ".swfl": "application/x-shockwave-flash",
+ ".tad": "application/octet-stream",
+ ".talk": "text/x-speech",
+ ".taz": "application/x-tar",
+ ".tbp": "application/x-timbuktu",
+ ".tbt": "application/x-timbuktu",
+ ".tgz": "application/x-compressed",
+ ".thm": "application/vnd.eri.thm",
+ ".tki": "application/x-tkined",
+ ".tkined": "application/x-tkined",
+ ".toc": "application/toc",
+ ".toy": "image/toy",
+ ".trk": "x-lml/x-gps",
+ ".trm": "application/x-msterminal",
+ ".tsi": "audio/tsplayer",
+ ".tsp": "application/dsptype",
+ ".ttf": "application/octet-stream",
+ ".ttz": "application/t-time",
+ ".uls": "text/iuls",
+ ".ult": "audio/x-mod",
+ ".uu": "application/x-uuencode",
+ ".uue": "application/x-uuencode",
+ ".vcf": "text/x-vcard",
+ ".vdo": "video/vdo",
+ ".vib": "audio/vib",
+ ".viv": "video/vivo",
+ ".vivo": "video/vivo",
+ ".vmd": "application/vocaltec-media-desc",
+ ".vmf": "application/vocaltec-media-file",
+ ".vmi": "application/x-dreamcast-vms-info",
+ ".vms": "application/x-dreamcast-vms",
+ ".vox": "audio/voxware",
+ ".vqe": "audio/x-twinvq-plugin",
+ ".vqf": "audio/x-twinvq",
+ ".vql": "audio/x-twinvq",
+ ".vre": "x-world/x-vream",
+ ".vrt": "x-world/x-vrt",
+ ".vrw": "x-world/x-vream",
+ ".vts": "workbook/formulaone",
+ ".wcm": "application/vnd.ms-works",
+ ".wdb": "application/vnd.ms-works",
+ ".web": "application/vnd.xara",
+ ".wi": "image/wavelet",
+ ".wis": "application/x-InstallShield",
+ ".wks": "application/vnd.ms-works",
+ ".wmd": "application/x-ms-wmd",
+ ".wmf": "application/x-msmetafile",
+ ".wmlscript": "text/vnd.wap.wmlscript",
+ ".wmz": "application/x-ms-wmz",
+ ".wpng": "image/x-up-wpng",
+ ".wps": "application/vnd.ms-works",
+ ".wpt": "x-lml/x-gps",
+ ".wri": "application/x-mswrite",
+ ".wrz": "x-world/x-vrml",
+ ".ws": "text/vnd.wap.wmlscript",
+ ".wsc": "application/vnd.wap.wmlscriptc",
+ ".wv": "video/wavelet",
+ ".wxl": "application/x-wxl",
+ ".x-gzip": "application/x-gzip",
+ ".xaf": "x-world/x-vrml",
+ ".xar": "application/vnd.xara",
+ ".xdm": "application/x-xdma",
+ ".xdma": "application/x-xdma",
+ ".xdw": "application/vnd.fujixerox.docuworks",
+ ".xhtm": "application/xhtml+xml",
+ ".xla": "application/vnd.ms-excel",
+ ".xlc": "application/vnd.ms-excel",
+ ".xll": "application/x-excel",
+ ".xlm": "application/vnd.ms-excel",
+ ".xlt": "application/vnd.ms-excel",
+ ".xlw": "application/vnd.ms-excel",
+ ".xm": "audio/x-mod",
+ ".xmz": "audio/x-mod",
+ ".xof": "x-world/x-vrml",
+ ".xpi": "application/x-xpinstall",
+ ".xsit": "text/xml",
+ ".yz1": "application/x-yz1",
+ ".z": "application/x-compress",
+ ".zac": "application/x-zaurus-zac",
+ ".json": "application/json",
}
// TypeByExtension returns the MIME type associated with the file extension ext.
@@ -240,6 +567,28 @@ func TypeByExtension(filePath string) string {
typ := mime.TypeByExtension(path.Ext(filePath))
if typ == "" {
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
+ } else {
+ if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") {
+ typ = removeCharsetInMimeType(typ)
+ }
}
return typ
}
+
+// Remove charset from mime type
+func removeCharsetInMimeType(typ string) (str string) {
+ temArr := strings.Split(typ, ";")
+ var builder strings.Builder
+ for i, s := range temArr {
+ tmpStr := strings.Trim(s, " ")
+ if strings.Contains(tmpStr, "charset=") {
+ continue
+ }
+ if i == 0 {
+ builder.WriteString(s)
+ } else {
+ builder.WriteString("; " + s)
+ }
+ }
+ return builder.String()
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
index 51f1c31e..b0b4a502 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
@@ -19,6 +19,7 @@ func (r *Response) Read(p []byte) (n int, err error) {
return r.Body.Read(p)
}
+// Close close http reponse body
func (r *Response) Close() error {
return r.Body.Close()
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
index e2597c24..56ed8cad 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
@@ -31,8 +31,14 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
cpConf := getCpConfig(options)
routines := getRoutines(options)
+ var strVersionId string
+ versionId, _ := FindOption(options, "versionId", nil)
+ if versionId != nil {
+ strVersionId = versionId.(string)
+ }
+
if cpConf != nil && cpConf.IsEnable {
- cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey)
+ cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId)
if cpFilePath != "" {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
}
@@ -42,11 +48,11 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
partSize, options, routines)
}
-func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string {
+func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
- cpFileName := getCpFileName(src, dest)
+ cpFileName := getCpFileName(src, dest, versionId)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
@@ -140,15 +146,15 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
partSize int64, options []Option, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
- payerOptions := []Option{}
- payer := getPayer(options)
- if payer != "" {
- payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
- }
+ // choice valid options
+ headerOptions := ChoiceHeadObjectOption(options)
+ partOptions := ChoiceTransferPartOption(options)
+ completeOptions := ChoiceCompletePartOption(options)
+ abortOptions := ChoiceAbortPartOption(options)
- meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
+ meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
if err != nil {
return err
}
@@ -173,11 +179,11 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
var completedBytes int64
totalBytes := getSrcObjectBytes(parts)
- event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+ event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start to copy workers
- arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
+ arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
@@ -193,13 +199,14 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
case part := <-results:
completed++
ups[part.PartNumber-1] = part
- completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
- event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+ copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
+ completedBytes += copyBytes
+ event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
- descBucket.AbortMultipartUpload(imur, payerOptions...)
- event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+ descBucket.AbortMultipartUpload(imur, abortOptions...)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
return err
}
@@ -209,13 +216,13 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
}
}
- event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
// Complete the multipart upload
- _, err = descBucket.CompleteMultipartUpload(imur, ups, payerOptions...)
+ _, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...)
if err != nil {
- bucket.AbortMultipartUpload(imur, payerOptions...)
+ bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
return nil
@@ -252,7 +259,7 @@ func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
return false, nil
}
- objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return false, err
}
@@ -340,7 +347,7 @@ func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObject
cp.DestBucketName = destBucket.BucketName
cp.DestObjectKey = destObjectKey
- objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
@@ -383,13 +390,7 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
partSize int64, options []Option, cpFilePath string, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
- listener := getProgressListener(options)
-
- payerOptions := []Option{}
- payer := getPayer(options)
- if payer != "" {
- payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
- }
+ listener := GetProgressListener(options)
// Load CP data
ccp := copyCheckpoint{}
@@ -398,8 +399,12 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
os.Remove(cpFilePath)
}
- // Make sure the object is not updated.
- meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
+ // choice valid options
+ headerOptions := ChoiceHeadObjectOption(options)
+ partOptions := ChoiceTransferPartOption(options)
+ completeOptions := ChoiceCompletePartOption(options)
+
+ meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
if err != nil {
return err
}
@@ -426,11 +431,11 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
die := make(chan bool)
completedBytes := ccp.getCompletedBytes()
- event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
+ event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
// Start the worker coroutines
- arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
+ arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
@@ -446,12 +451,13 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
completed++
ccp.update(part)
ccp.dump(cpFilePath)
- completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
- event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size)
+ copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
+ completedBytes += copyBytes
+ event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
- event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
return err
}
@@ -461,8 +467,8 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
}
}
- event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
- return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, payerOptions)
+ return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions)
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
index b5a3a05b..aea7fafb 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/xml"
"io"
+ "io/ioutil"
"net/http"
"net/url"
"os"
@@ -15,17 +16,20 @@ import (
//
// objectKey object name
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
-// ServerSideEncryption, Meta, check out the following link:
-// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
+//
+// ServerSideEncryption, Meta, check out the following link:
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/initiatemultipartupload
//
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
var imur InitiateMultipartUploadResult
- opts := addContentType(options, objectKey)
- params := map[string]interface{}{}
+ opts := AddContentType(options, objectKey)
+ params, _ := GetRawParams(options)
+ paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"}
+ ConvertEmptyValueToNil(params, paramKeys)
params["uploads"] = nil
+
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
if err != nil {
return imur, err
@@ -50,7 +54,6 @@ func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option
//
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
request := &UploadPartRequest{
@@ -75,7 +78,6 @@ func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Re
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var part = UploadPart{}
@@ -104,9 +106,8 @@ func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, file
//
// UploadPartResult the result of uploading part.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
options = append(options, ContentLength(request.PartSize))
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(request.PartNumber)
@@ -123,8 +124,8 @@ func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option)
PartNumber: request.PartNumber,
}
- if bucket.getConfig().IsEnableCRC {
- err = checkCRC(resp, "DoUploadPart")
+ if bucket.GetConfig().IsEnableCRC {
+ err = CheckCRC(resp, "DoUploadPart")
if err != nil {
return &UploadPartResult{part}, err
}
@@ -141,20 +142,32 @@ func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option)
// partSize the part size
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
-// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
-// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
+//
+// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
+// https://www.alibabacloud.com/help/en/object-storage-service/latest/uploadpartcopy
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var out UploadPartCopyResult
var part UploadPart
+ var opts []Option
+
+ //first find version id
+ versionIdKey := "versionId"
+ versionId, _ := FindOption(options, versionIdKey, nil)
+ if versionId == nil {
+ opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
+ CopySourceRange(startPosition, partSize)}
+ } else {
+ opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)),
+ CopySourceRange(startPosition, partSize)}
+ options = DeleteOption(options, versionIdKey)
+ }
- opts := []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
- CopySourceRange(startPosition, partSize)}
opts = append(opts, options...)
+
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(partNumber)
params["uploadId"] = imur.UploadID
@@ -181,12 +194,11 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
//
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
var out CompleteMultipartUploadResult
- sort.Sort(uploadParts(parts))
+ sort.Sort(UploadParts(parts))
cxml := completeMultipartUploadXML{}
cxml.Part = parts
bs, err := xml.Marshal(cxml)
@@ -203,8 +215,28 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
return out, err
}
defer resp.Body.Close()
-
- err = xmlUnmarshal(resp.Body, &out)
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return out, err
+ }
+ err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
+ if len(body) > 0 {
+ if err != nil {
+ err = tryConvertServiceError(body, resp, err)
+ } else {
+ callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
+ if callback == nil {
+ err = xml.Unmarshal(body, &out)
+ } else {
+ rb, _ := FindOption(options, responseBody, nil)
+ if rb != nil {
+ if rbody, ok := rb.(*[]byte); ok {
+ *rbody = body
+ }
+ }
+ }
+ }
+ }
return out, err
}
@@ -213,7 +245,6 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
// imur the return value of InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
@@ -222,7 +253,7 @@ func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, op
return err
}
defer resp.Body.Close()
- return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+ return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// ListUploadedParts lists the uploaded parts.
@@ -231,19 +262,18 @@ func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, op
//
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
var out ListUploadedPartsResult
options = append(options, EncodingType("url"))
params := map[string]interface{}{}
- params, err := getRawParams(options)
+ params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["uploadId"] = imur.UploadID
- resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil)
+ resp, err := bucket.do("GET", imur.Key, params, options, nil, nil)
if err != nil {
return out, err
}
@@ -260,22 +290,22 @@ func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, optio
// ListMultipartUploads lists all ongoing multipart upload tasks
//
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
-// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
+//
+// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
//
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
-//
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
var out ListMultipartUploadResult
options = append(options, EncodingType("url"))
- params, err := getRawParams(options)
+ params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["uploads"] = nil
- resp, err := bucket.do("GET", "", params, options, nil, nil)
+ resp, err := bucket.doInner("GET", "", params, options, nil, nil)
if err != nil {
return out, err
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
index 5952f8ae..7e517ddb 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
@@ -1,8 +1,11 @@
package oss
import (
+ "context"
"fmt"
+ "io/ioutil"
"net/http"
+ "net/url"
"strconv"
"strings"
"time"
@@ -11,9 +14,11 @@ import (
type optionType string
const (
- optionParam optionType = "HTTPParameter" // URL parameter
- optionHTTP optionType = "HTTPHeader" // HTTP header
- optionArg optionType = "FuncArgument" // Function argument
+ optionParam optionType = "HTTPParameter" // URL parameter
+ optionHTTP optionType = "HTTPHeader" // HTTP header
+ optionContext optionType = "HTTPContext" // context
+ optionArg optionType = "FuncArgument" // Function argument
+
)
const (
@@ -23,6 +28,11 @@ const (
initCRC64 = "init-crc64"
progressListener = "x-progress-listener"
storageClass = "storage-class"
+ responseHeader = "x-response-header"
+ redundancyType = "redundancy-type"
+ objectHashFunc = "object-hash-func"
+ responseBody = "x-response-body"
+ contextArg = "x-context-arg"
)
type (
@@ -125,6 +135,11 @@ func CopySource(sourceBucket, sourceObject string) Option {
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
}
+// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId
+func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option {
+ return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId)
+}
+
// CopySourceRange is an option to set X-Oss-Copy-Source header
func CopySourceRange(startPosition, partSize int64) Option {
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
@@ -167,6 +182,26 @@ func ServerSideEncryptionKeyID(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
}
+// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header
+func ServerSideDataEncryption(value string) Option {
+ return setHeader(HTTPHeaderOssServerSideDataEncryption, value)
+}
+
+// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header
+func SSECAlgorithm(value string) Option {
+ return setHeader(HTTPHeaderSSECAlgorithm, value)
+}
+
+// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header
+func SSECKey(value string) Option {
+ return setHeader(HTTPHeaderSSECKey, value)
+}
+
+// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header
+func SSECKeyMd5(value string) Option {
+ return setHeader(HTTPHeaderSSECKeyMd5, value)
+}
+
// ObjectACL is an option to set X-Oss-Object-Acl header
func ObjectACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssObjectACL, string(acl))
@@ -199,7 +234,83 @@ func CallbackVar(callbackVar string) Option {
// RequestPayer is an option to set payer who pay for the request
func RequestPayer(payerType PayerType) Option {
- return setHeader(HTTPHeaderOSSRequester, string(payerType))
+ return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType)))
+}
+
+// RequestPayerParam is an option to set payer who pay for the request
+func RequestPayerParam(payerType PayerType) Option {
+ return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType)))
+}
+
+// SetTagging is an option to set object tagging
+func SetTagging(tagging Tagging) Option {
+ if len(tagging.Tags) == 0 {
+ return nil
+ }
+
+ taggingValue := ""
+ for index, tag := range tagging.Tags {
+ if index != 0 {
+ taggingValue += "&"
+ }
+ taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value)
+ }
+ return setHeader(HTTPHeaderOssTagging, taggingValue)
+}
+
+// TaggingDirective is an option to set X-Oss-Metadata-Directive header
+func TaggingDirective(directive TaggingDirectiveType) Option {
+ return setHeader(HTTPHeaderOssTaggingDirective, string(directive))
+}
+
+// ACReqMethod is an option to set Access-Control-Request-Method header
+func ACReqMethod(value string) Option {
+ return setHeader(HTTPHeaderACReqMethod, value)
+}
+
+// ACReqHeaders is an option to set Access-Control-Request-Headers header
+func ACReqHeaders(value string) Option {
+ return setHeader(HTTPHeaderACReqHeaders, value)
+}
+
+// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
+func TrafficLimitHeader(value int64) Option {
+ return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10))
+}
+
+// UserAgentHeader is an option to set HTTPHeaderUserAgent
+func UserAgentHeader(ua string) Option {
+ return setHeader(HTTPHeaderUserAgent, ua)
+}
+
+// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
+func ForbidOverWrite(forbidWrite bool) Option {
+ if forbidWrite {
+ return setHeader(HTTPHeaderOssForbidOverWrite, "true")
+ } else {
+ return setHeader(HTTPHeaderOssForbidOverWrite, "false")
+ }
+}
+
+// RangeBehavior is an option to set Range value, such as "standard"
+func RangeBehavior(value string) Option {
+ return setHeader(HTTPHeaderOssRangeBehavior, value)
+}
+
+func PartHashCtxHeader(value string) Option {
+ return setHeader(HTTPHeaderOssHashCtx, value)
+}
+
+func PartMd5CtxHeader(value string) Option {
+ return setHeader(HTTPHeaderOssMd5Ctx, value)
+}
+
+func PartHashCtxParam(value string) Option {
+ return addParam("x-oss-hash-ctx", value)
+}
+
+func PartMd5CtxParam(value string) Option {
+ return addParam("x-oss-md5-ctx", value)
}
// Delimiter is an option to set delimiler parameter
@@ -237,6 +348,26 @@ func KeyMarker(value string) Option {
return addParam("key-marker", value)
}
+// VersionIdMarker is an option to set version-id-marker parameter
+func VersionIdMarker(value string) Option {
+ return addParam("version-id-marker", value)
+}
+
+// VersionId is an option to set versionId parameter
+func VersionId(value string) Option {
+ return addParam("versionId", value)
+}
+
+// TagKey is an option to set tag key parameter
+func TagKey(value string) Option {
+ return addParam("tag-key", value)
+}
+
+// TagValue is an option to set tag value parameter
+func TagValue(value string) Option {
+ return addParam("tag-value", value)
+}
+
// UploadIDMarker is an option to set upload-id-marker parameter
func UploadIDMarker(value string) Option {
return addParam("upload-id-marker", value)
@@ -252,6 +383,57 @@ func PartNumberMarker(value int) Option {
return addParam("part-number-marker", strconv.Itoa(value))
}
+// Sequential is an option to set sequential parameter for InitiateMultipartUpload
+func Sequential() Option {
+ return addParam("sequential", "")
+}
+
+// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload
+func WithHashContext() Option {
+ return addParam("withHashContext", "")
+}
+
+// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload
+func EnableMd5() Option {
+ return addParam("x-oss-enable-md5", "")
+}
+
+// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload
+func EnableSha1() Option {
+ return addParam("x-oss-enable-sha1", "")
+}
+
+// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload
+func EnableSha256() Option {
+ return addParam("x-oss-enable-sha256", "")
+}
+
+// ListType is an option to set List-type parameter for ListObjectsV2
+func ListType(value int) Option {
+ return addParam("list-type", strconv.Itoa(value))
+}
+
+// StartAfter is an option to set start-after parameter for ListObjectsV2
+func StartAfter(value string) Option {
+ return addParam("start-after", value)
+}
+
+// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2
+func ContinuationToken(value string) Option {
+ if value == "" {
+ return addParam("continuation-token", nil)
+ }
+ return addParam("continuation-token", value)
+}
+
+// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2
+func FetchOwner(value bool) Option {
+ if value {
+ return addParam("fetch-owner", "true")
+ }
+ return addParam("fetch-owner", "false")
+}
+
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
func DeleteObjectsQuiet(isQuiet bool) Option {
return addArg(deleteObjectsQuiet, isQuiet)
@@ -262,6 +444,21 @@ func StorageClass(value StorageClassType) Option {
return addArg(storageClass, value)
}
+// RedundancyType bucket data redundancy type
+func RedundancyType(value DataRedundancyType) Option {
+ return addArg(redundancyType, value)
+}
+
+// RedundancyType bucket data redundancy type
+func ObjectHashFunc(value ObjecthashFuncType) Option {
+ return addArg(objectHashFunc, value)
+}
+
+// WithContext returns an option that sets the context for requests.
+func WithContext(ctx context.Context) Option {
+ return addArg(contextArg, ctx)
+}
+
// Checkpoint configuration
type cpConfig struct {
IsEnable bool
@@ -294,6 +491,16 @@ func Progress(listener ProgressListener) Option {
return addArg(progressListener, listener)
}
+// GetResponseHeader for get response http header
+func GetResponseHeader(respHeader *http.Header) Option {
+ return addArg(responseHeader, respHeader)
+}
+
+// CallbackResult for get response of call back
+func CallbackResult(body *[]byte) Option {
+ return addArg(responseBody, body)
+}
+
// ResponseContentType is an option to set response-content-type param
func ResponseContentType(value string) Option {
return addParam("response-content-type", value)
@@ -329,6 +536,21 @@ func Process(value string) Option {
return addParam("x-oss-process", value)
}
+// TrafficLimitParam is a option to set x-oss-traffic-limit
+func TrafficLimitParam(value int64) Option {
+ return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10))
+}
+
+// SetHeader Allow users to set personalized http headers
+func SetHeader(key string, value interface{}) Option {
+ return setHeader(key, value)
+}
+
+// AddParam Allow users to set personalized http params
+func AddParam(key string, value interface{}) Option {
+ return addParam(key, value)
+}
+
func setHeader(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
@@ -377,7 +599,7 @@ func handleOptions(headers map[string]string, options []Option) error {
return nil
}
-func getRawParams(options []Option) (map[string]interface{}, error) {
+func GetRawParams(options []Option) (map[string]interface{}, error) {
// Option
params := map[string]optionValue{}
for _, option := range options {
@@ -400,7 +622,7 @@ func getRawParams(options []Option) (map[string]interface{}, error) {
return paramsm, nil
}
-func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
+func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
@@ -416,7 +638,7 @@ func findOption(options []Option, param string, defaultVal interface{}) (interfa
return defaultVal, nil
}
-func isOptionSet(options []Option, option string) (bool, interface{}, error) {
+func IsOptionSet(options []Option, option string) (bool, interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
@@ -431,3 +653,83 @@ func isOptionSet(options []Option, option string) (bool, interface{}, error) {
}
return false, nil, nil
}
+
+func DeleteOption(options []Option, strKey string) []Option {
+ var outOption []Option
+ params := map[string]optionValue{}
+ for _, option := range options {
+ if option != nil {
+ option(params)
+ _, exist := params[strKey]
+ if !exist {
+ outOption = append(outOption, option)
+ } else {
+ delete(params, strKey)
+ }
+ }
+ }
+ return outOption
+}
+
+func GetRequestId(header http.Header) string {
+ return header.Get("x-oss-request-id")
+}
+
+func GetVersionId(header http.Header) string {
+ return header.Get("x-oss-version-id")
+}
+
+func GetCopySrcVersionId(header http.Header) string {
+ return header.Get("x-oss-copy-source-version-id")
+}
+
+func GetDeleteMark(header http.Header) bool {
+ value := header.Get("x-oss-delete-marker")
+ if strings.ToUpper(value) == "TRUE" {
+ return true
+ }
+ return false
+}
+
+func GetQosDelayTime(header http.Header) string {
+ return header.Get("x-oss-qos-delay-time")
+}
+
+// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
+func AllowSameActionOverLap(enabled bool) Option {
+ if enabled {
+ return setHeader(HTTPHeaderAllowSameActionOverLap, "true")
+ } else {
+ return setHeader(HTTPHeaderAllowSameActionOverLap, "false")
+ }
+}
+
+func GetCallbackBody(options []Option, resp *Response, callbackSet bool) error {
+ var err error
+
+ // get response body
+ if callbackSet {
+ err = setBody(options, resp)
+ } else {
+ callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
+ if callback != nil {
+ err = setBody(options, resp)
+ }
+ }
+ return err
+}
+
+func setBody(options []Option, resp *Response) error {
+ respBody, _ := FindOption(options, responseBody, nil)
+ if respBody != nil && resp != nil {
+ pRespBody := respBody.(*[]byte)
+ pBody, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ if pBody != nil {
+ *pRespBody = pBody
+ }
+ }
+ return nil
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
index b38d803f..1b6535ba 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
@@ -1,6 +1,8 @@
package oss
-import "io"
+import (
+ "io"
+)
// ProgressEventType defines transfer progress event type
type ProgressEventType int
@@ -8,7 +10,7 @@ type ProgressEventType int
const (
// TransferStartedEvent transfer started, set TotalBytes
TransferStartedEvent ProgressEventType = 1 + iota
- // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
+ // TransferDataEvent transfer data, set ConsumedBytes and TotalBytes
TransferDataEvent
// TransferCompletedEvent transfer completed
TransferCompletedEvent
@@ -20,6 +22,7 @@ const (
type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
+ RwBytes int64
EventType ProgressEventType
}
@@ -30,10 +33,11 @@ type ProgressListener interface {
// -------------------- Private --------------------
-func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
+func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
return &ProgressEvent{
ConsumedBytes: consumed,
TotalBytes: total,
+ RwBytes: rwBytes,
EventType: eventType}
}
@@ -78,7 +82,7 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
// Read encountered error
if err != nil && err != io.EOF {
- event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
+ event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0)
publishProgress(t.listener, event)
}
@@ -92,7 +96,7 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
}
// Progress
if t.listener != nil {
- event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
+ event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n))
publishProgress(t.listener, event)
}
// Track
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go
new file mode 100644
index 00000000..ea17d5bd
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go
@@ -0,0 +1,12 @@
+//go:build !go1.7
+// +build !go1.7
+
+package oss
+
+import "net/http"
+
+// http.ErrUseLastResponse only is defined go1.7 onward
+
+func disableHTTPRedirect(client *http.Client) {
+
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go
new file mode 100644
index 00000000..5d1442dd
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go
@@ -0,0 +1,13 @@
+//go:build go1.7
+// +build go1.7
+
+package oss
+
+import "net/http"
+
+// http.ErrUseLastResponse only is defined go1.7 onward
+func disableHTTPRedirect(client *http.Client) {
+ client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ }
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go
new file mode 100644
index 00000000..2e0da463
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go
@@ -0,0 +1,197 @@
+package oss
+
+import (
+ "bytes"
+ "encoding/xml"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+)
+
+// CreateSelectCsvObjectMeta is Creating csv object meta
+//
+// key the object key.
+// csvMeta the csv file meta
+// options the options for create csv Meta of the object.
+//
+// MetaEndFrameCSV the csv file meta info
+// error it's nil if no error, otherwise it's an error object.
+//
+func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) {
+ var endFrame MetaEndFrameCSV
+ params := map[string]interface{}{}
+ params["x-oss-process"] = "csv/meta"
+
+ csvMeta.encodeBase64()
+ bs, err := xml.Marshal(csvMeta)
+ if err != nil {
+ return endFrame, err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
+ if err != nil {
+ return endFrame, err
+ }
+ defer resp.Body.Close()
+
+ _, err = ioutil.ReadAll(resp)
+
+ return resp.Frame.MetaEndFrameCSV, err
+}
+
+// CreateSelectJsonObjectMeta is Creating json object meta
+//
+// key the object key.
+// csvMeta the json file meta
+// options the options for create json Meta of the object.
+//
+// MetaEndFrameJSON the json file meta info
+// error it's nil if no error, otherwise it's an error object.
+//
+func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) {
+ var endFrame MetaEndFrameJSON
+ params := map[string]interface{}{}
+ params["x-oss-process"] = "json/meta"
+
+ bs, err := xml.Marshal(jsonMeta)
+ if err != nil {
+ return endFrame, err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
+ if err != nil {
+ return endFrame, err
+ }
+ defer resp.Body.Close()
+
+ _, err = ioutil.ReadAll(resp)
+
+ return resp.Frame.MetaEndFrameJSON, err
+}
+
+// SelectObject is the select object api, approve csv and json file.
+//
+// key the object key.
+// selectReq the request data for select object
+// options the options for select file of the object.
+//
+// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) {
+ params := map[string]interface{}{}
+ if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
+ params["x-oss-process"] = "csv/select" // default select csv file
+ } else {
+ params["x-oss-process"] = "json/select"
+ }
+ selectReq.encodeBase64()
+ bs, err := xml.Marshal(selectReq)
+ if err != nil {
+ return nil, err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+ resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
+ if err != nil {
+ return nil, err
+ }
+ if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true {
+ resp.Frame.EnablePayloadCrc = true
+ }
+ resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE"
+
+ return resp, err
+}
+
+// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file.
+//
+// key the object key.
+// params the resource of oss approve csv/meta, json/meta, csv/select, json/select.
+// buf the request data trans to buffer.
+// options the options for select file of the object.
+//
+// SelectObjectResponse the response of select object.
+// error it's nil if no error, otherwise it's an error object.
+//
+func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) {
+ resp, err := bucket.do("POST", key, params, options, buf, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ result := &SelectObjectResponse{
+ Body: resp.Body,
+ StatusCode: resp.StatusCode,
+ Frame: SelectObjectResult{},
+ }
+ result.Headers = resp.Headers
+ // result.Frame = SelectObjectResult{}
+ result.ReadTimeOut = bucket.GetConfig().Timeout
+
+ // Progress
+ listener := GetProgressListener(options)
+
+ // CRC32
+ crcCalc := crc32.NewIEEE()
+ result.WriterForCheckCrc32 = crcCalc
+ result.Body = TeeReader(resp.Body, nil, 0, listener, nil)
+
+ err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK})
+
+ return result, err
+}
+
+// SelectObjectIntoFile is the selectObject to file api
+//
+// key the object key.
+// fileName saving file's name to localstation.
+// selectReq the request data for select object
+// options the options for select file of the object.
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error {
+ tempFilePath := fileName + TempFileSuffix
+
+ params := map[string]interface{}{}
+ if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
+ params["x-oss-process"] = "csv/select" // default select csv file
+ } else {
+ params["x-oss-process"] = "json/select"
+ }
+ selectReq.encodeBase64()
+ bs, err := xml.Marshal(selectReq)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+ resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
+ if err != nil {
+ return err
+ }
+ defer resp.Close()
+
+ // If the local file does not exist, create a new one. If it exists, overwrite it.
+ fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
+ if err != nil {
+ return err
+ }
+
+ // Copy the data to the local file path.
+ _, err = io.Copy(fd, resp)
+ fd.Close()
+ if err != nil {
+ return err
+ }
+
+ return os.Rename(tempFilePath, fileName)
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go
new file mode 100644
index 00000000..75a45455
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go
@@ -0,0 +1,365 @@
+package oss
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+ "net/http"
+ "time"
+)
+
+// The adapter class for Select object's response.
+// The response consists of frames. Each frame has the following format:
+
+// Type | Payload Length | Header Checksum | Payload | Payload Checksum
+
+// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
+// And we have three kind of frames.
+// Data Frame:
+// Type:8388609
+// Payload: Offset | Data
+// <-8 bytes>
+
+// Continuous Frame
+// Type:8388612
+// Payload: Offset (8-bytes)
+
+// End Frame
+// Type:8388613
+// Payload: Offset | total scanned bytes | http status code | error message
+// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->
+
+// SelectObjectResponse defines HTTP response from OSS SelectObject
+type SelectObjectResponse struct {
+ StatusCode int
+ Headers http.Header
+ Body io.ReadCloser
+ Frame SelectObjectResult
+ ReadTimeOut uint
+ ClientCRC32 uint32
+ ServerCRC32 uint32
+ WriterForCheckCrc32 hash.Hash32
+ Finish bool
+}
+
+func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) {
+ n, err = sr.readFrames(p)
+ return
+}
+
+// Close http reponse body
+func (sr *SelectObjectResponse) Close() error {
+ return sr.Body.Close()
+}
+
+// PostSelectResult is the request of SelectObject
+type PostSelectResult struct {
+ Response *SelectObjectResponse
+}
+
+// readFrames is read Frame
+func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) {
+ var nn int
+ var err error
+ var checkValid bool
+ if sr.Frame.OutputRawData == true {
+ nn, err = sr.Body.Read(p)
+ return nn, err
+ }
+
+ if sr.Finish {
+ return 0, io.EOF
+ }
+
+ for {
+ // if this Frame is Readed, then not reading Header
+ if sr.Frame.OpenLine != true {
+ err = sr.analysisHeader()
+ if err != nil {
+ return nn, err
+ }
+ }
+
+ if sr.Frame.FrameType == DataFrameType {
+ n, err := sr.analysisData(p[nn:])
+ if err != nil {
+ return nn, err
+ }
+ nn += n
+
+ // if this Frame is readed all data, then empty the Frame to read it with next frame
+ if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 {
+ checkValid, err = sr.checkPayloadSum()
+ if err != nil || !checkValid {
+ return nn, fmt.Errorf("%s", err.Error())
+ }
+ sr.emptyFrame()
+ }
+
+ if nn == len(p) {
+ return nn, nil
+ }
+ } else if sr.Frame.FrameType == ContinuousFrameType {
+ checkValid, err = sr.checkPayloadSum()
+ if err != nil || !checkValid {
+ return nn, fmt.Errorf("%s", err.Error())
+ }
+ sr.Frame.OpenLine = false
+ } else if sr.Frame.FrameType == EndFrameType {
+ err = sr.analysisEndFrame()
+ if err != nil {
+ return nn, err
+ }
+ checkValid, err = sr.checkPayloadSum()
+ if checkValid {
+ sr.Finish = true
+ }
+ return nn, err
+ } else if sr.Frame.FrameType == MetaEndFrameCSVType {
+ err = sr.analysisMetaEndFrameCSV()
+ if err != nil {
+ return nn, err
+ }
+ checkValid, err = sr.checkPayloadSum()
+ if checkValid {
+ sr.Finish = true
+ }
+ return nn, err
+ } else if sr.Frame.FrameType == MetaEndFrameJSONType {
+ err = sr.analysisMetaEndFrameJSON()
+ if err != nil {
+ return nn, err
+ }
+ checkValid, err = sr.checkPayloadSum()
+ if checkValid {
+ sr.Finish = true
+ }
+ return nn, err
+ }
+ }
+ return nn, nil
+}
+
+type chanReadIO struct {
+ readLen int
+ err error
+}
+
+func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) {
+ r := sr.Body
+ ch := make(chan chanReadIO, 1)
+ defer close(ch)
+ go func(p []byte) {
+ var needReadLength int
+ readChan := chanReadIO{}
+ needReadLength = len(p)
+ for {
+ n, err := r.Read(p[readChan.readLen:needReadLength])
+ readChan.readLen += n
+ if err != nil {
+ readChan.err = err
+ ch <- readChan
+ return
+ }
+
+ if readChan.readLen == needReadLength {
+ break
+ }
+ }
+ ch <- readChan
+ }(p)
+
+ select {
+ case <-time.After(time.Second * timeOut):
+ return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p))
+ case result := <-ch:
+ return result.readLen, result.err
+ }
+}
+
+// analysisHeader is reading selectObject response body's header
+func (sr *SelectObjectResponse) analysisHeader() error {
+ headFrameByte := make([]byte, 20)
+ _, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut))
+ if err != nil {
+ return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
+ }
+
+ frameTypeByte := headFrameByte[0:4]
+ sr.Frame.Version = frameTypeByte[0]
+ frameTypeByte[0] = 0
+ bytesToInt(frameTypeByte, &sr.Frame.FrameType)
+
+ if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType &&
+ sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType {
+ return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType)
+ }
+
+ payloadLengthByte := headFrameByte[4:8]
+ bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength)
+ headCheckSumByte := headFrameByte[8:12]
+ bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum)
+ byteOffset := headFrameByte[12:20]
+ bytesToInt(byteOffset, &sr.Frame.Offset)
+ sr.Frame.OpenLine = true
+
+ err = sr.writerCheckCrc32(byteOffset)
+ return err
+}
+
+// analysisData is reading the DataFrameType data of selectObject response body
+func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) {
+ var needReadLength int32
+ lenP := int32(len(p))
+ restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength
+ if lenP <= restByteLength {
+ needReadLength = lenP
+ } else {
+ needReadLength = restByteLength
+ }
+ n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut))
+ if err != nil {
+ return n, fmt.Errorf("read frame data error,%s", err.Error())
+ }
+ sr.Frame.ConsumedBytesLength += int32(n)
+ err = sr.writerCheckCrc32(p[:n])
+ return n, err
+}
+
+// analysisEndFrame is reading the EndFrameType data of selectObject response body
+func (sr *SelectObjectResponse) analysisEndFrame() error {
+ var eF EndFrame
+ payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
+ _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
+ if err != nil {
+ return fmt.Errorf("read end frame error:%s", err.Error())
+ }
+ bytesToInt(payLoadBytes[0:8], &eF.TotalScanned)
+ bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode)
+ errMsgLength := sr.Frame.PayloadLength - 20
+ eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12])
+ sr.Frame.EndFrame.TotalScanned = eF.TotalScanned
+ sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode
+ sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg
+ err = sr.writerCheckCrc32(payLoadBytes)
+ return err
+}
+
+// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body
+func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error {
+ var mCF MetaEndFrameCSV
+ payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
+ _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
+ if err != nil {
+ return fmt.Errorf("read meta end csv frame error:%s", err.Error())
+ }
+
+ bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned)
+ bytesToInt(payLoadBytes[8:12], &mCF.Status)
+ bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount)
+ bytesToInt(payLoadBytes[16:24], &mCF.RowsCount)
+ bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount)
+ errMsgLength := sr.Frame.PayloadLength - 36
+ mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28])
+ sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg
+ sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned
+ sr.Frame.MetaEndFrameCSV.Status = mCF.Status
+ sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount
+ sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount
+ sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount
+ err = sr.writerCheckCrc32(payLoadBytes)
+ return err
+}
+
+// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body
+func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error {
+ var mJF MetaEndFrameJSON
+ payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
+ _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
+ if err != nil {
+ return fmt.Errorf("read meta end json frame error:%s", err.Error())
+ }
+
+ bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned)
+ bytesToInt(payLoadBytes[8:12], &mJF.Status)
+ bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount)
+ bytesToInt(payLoadBytes[16:24], &mJF.RowsCount)
+ errMsgLength := sr.Frame.PayloadLength - 32
+ mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24])
+ sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg
+ sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned
+ sr.Frame.MetaEndFrameJSON.Status = mJF.Status
+ sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount
+ sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount
+
+ err = sr.writerCheckCrc32(payLoadBytes)
+ return err
+}
+
+func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) {
+ payLoadChecksumByte := make([]byte, 4)
+ n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut))
+ if n == 4 {
+ bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum)
+ sr.ServerCRC32 = sr.Frame.PayloadChecksum
+ sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32()
+ if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 {
+ return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d",
+ sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32)
+ }
+ return true, err
+ }
+ return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
+}
+
+func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) {
+ err = nil
+ if sr.Frame.EnablePayloadCrc == true {
+ _, err = sr.WriterForCheckCrc32.Write(p)
+ }
+ return err
+}
+
+// emptyFrame is emptying SelectObjectResponse Frame information
+func (sr *SelectObjectResponse) emptyFrame() {
+ crcCalc := crc32.NewIEEE()
+ sr.WriterForCheckCrc32 = crcCalc
+ sr.Finish = false
+
+ sr.Frame.ConsumedBytesLength = 0
+ sr.Frame.OpenLine = false
+ sr.Frame.Version = byte(0)
+ sr.Frame.FrameType = 0
+ sr.Frame.PayloadLength = 0
+ sr.Frame.HeaderCheckSum = 0
+ sr.Frame.Offset = 0
+ sr.Frame.Data = ""
+
+ sr.Frame.EndFrame.TotalScanned = 0
+ sr.Frame.EndFrame.HTTPStatusCode = 0
+ sr.Frame.EndFrame.ErrorMsg = ""
+
+ sr.Frame.MetaEndFrameCSV.TotalScanned = 0
+ sr.Frame.MetaEndFrameCSV.Status = 0
+ sr.Frame.MetaEndFrameCSV.SplitsCount = 0
+ sr.Frame.MetaEndFrameCSV.RowsCount = 0
+ sr.Frame.MetaEndFrameCSV.ColumnsCount = 0
+ sr.Frame.MetaEndFrameCSV.ErrorMsg = ""
+
+ sr.Frame.MetaEndFrameJSON.TotalScanned = 0
+ sr.Frame.MetaEndFrameJSON.Status = 0
+ sr.Frame.MetaEndFrameJSON.SplitsCount = 0
+ sr.Frame.MetaEndFrameJSON.RowsCount = 0
+ sr.Frame.MetaEndFrameJSON.ErrorMsg = ""
+
+ sr.Frame.PayloadChecksum = 0
+}
+
+// bytesToInt byte's array trans to int
+func bytesToInt(b []byte, ret interface{}) {
+ binBuf := bytes.NewBuffer(b)
+ binary.Read(binBuf, binary.BigEndian, ret)
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
index e6de4cdd..08a83a00 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
@@ -1,10 +1,13 @@
+//go:build !go1.7
// +build !go1.7
package oss
import (
+ "crypto/tls"
"net"
"net/http"
+ "time"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
@@ -13,7 +16,14 @@ func newTransport(conn *Conn, config *Config) *http.Transport {
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
- conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
+ d := net.Dialer{
+ Timeout: httpTimeOut.ConnectTimeout,
+ KeepAlive: 30 * time.Second,
+ }
+ if config.LocalAddr != nil {
+ d.LocalAddr = config.LocalAddr
+ }
+ conn, err := d.Dial(netw, addr)
if err != nil {
return nil, err
}
@@ -22,5 +32,11 @@ func newTransport(conn *Conn, config *Config) *http.Transport {
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
+
+ if config.InsecureSkipVerify {
+ transport.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+ }
return transport
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
index 006ea47a..1acb8415 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
@@ -1,10 +1,13 @@
+//go:build go1.7
// +build go1.7
package oss
import (
+ "crypto/tls"
"net"
"net/http"
+ "time"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
@@ -13,7 +16,14 @@ func newTransport(conn *Conn, config *Config) *http.Transport {
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
- conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
+ d := net.Dialer{
+ Timeout: httpTimeOut.ConnectTimeout,
+ KeepAlive: 30 * time.Second,
+ }
+ if config.LocalAddr != nil {
+ d.LocalAddr = config.LocalAddr
+ }
+ conn, err := d.Dial(netw, addr)
if err != nil {
return nil, err
}
@@ -21,8 +31,15 @@ func newTransport(conn *Conn, config *Config) *http.Transport {
},
MaxIdleConns: httpMaxConns.MaxIdleConns,
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
+ MaxConnsPerHost: httpMaxConns.MaxConnsPerHost,
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
+
+ if config.InsecureSkipVerify {
+ transport.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+ }
return transport
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
index d205d9ac..a704e3ce 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
@@ -1,8 +1,12 @@
package oss
import (
+ "encoding/base64"
"encoding/xml"
+ "fmt"
"net/url"
+ "strconv"
+ "strings"
"time"
)
@@ -25,6 +29,29 @@ type BucketProperties struct {
Location string `xml:"Location"` // Bucket datacenter
CreationDate time.Time `xml:"CreationDate"` // Bucket create time
StorageClass string `xml:"StorageClass"` // Bucket storage class
+ Region string `xml:"Region"` // Bucket region
+}
+
+// ListCloudBoxResult defines the result object from ListBuckets request
+type ListCloudBoxResult struct {
+ XMLName xml.Name `xml:"ListCloudBoxResult"`
+ Prefix string `xml:"Prefix"` // The prefix in this query
+ Marker string `xml:"Marker"` // The marker filter
+ MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true.
+ IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining cloudboxes to return.
+ NextMarker string `xml:"NextMarker"` // The marker filter for the next list call
+ Owner string `xml:"Owner>DisplayName"` // The owner information
+ CloudBoxes []CloudBoxProperties `xml:"CloudBoxes>CloudBox"` // The cloudbox list
+}
+
+// CloudBoxProperties defines cloudbox properties
+type CloudBoxProperties struct {
+ XMLName xml.Name `xml:"CloudBox"`
+ ID string `xml:"ID"`
+ Name string `xml:"Name"`
+ Region string `xml:"Region"`
+ ControlEndpoint string `xml:"ControlEndpoint"`
+ DataEndpoint string `xml:"DataEndpoint"`
}
// GetBucketACLResult defines GetBucketACL request's result
@@ -42,77 +69,139 @@ type LifecycleConfiguration struct {
// LifecycleRule defines Lifecycle rules
type LifecycleRule struct {
- XMLName xml.Name `xml:"Rule"`
- ID string `xml:"ID"` // The rule ID
- Prefix string `xml:"Prefix"` // The object key prefix
- Status string `xml:"Status"` // The rule status (enabled or not)
- Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property
+ XMLName xml.Name `xml:"Rule"`
+ ID string `xml:"ID,omitempty"` // The rule ID
+ Prefix string `xml:"Prefix"` // The object key prefix
+ Status string `xml:"Status"` // The rule status (enabled or not)
+ Tags []Tag `xml:"Tag,omitempty"` // the tags property
+ Expiration *LifecycleExpiration `xml:"Expiration,omitempty"` // The expiration property
+ Transitions []LifecycleTransition `xml:"Transition,omitempty"` // The transition property
+ AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property
+ NonVersionExpiration *LifecycleVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
+ // Deprecated: Use NonVersionTransitions instead.
+ NonVersionTransition *LifecycleVersionTransition `xml:"-"` // NonVersionTransition is not suggested to use
+ NonVersionTransitions []LifecycleVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
+ Filter *LifecycleFilter `xml:Filter,omitempty` //condition parameter container of this exclusion rule
}
// LifecycleExpiration defines the rule's expiration property
type LifecycleExpiration struct {
- XMLName xml.Name `xml:"Expiration"`
- Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
- Date time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date.
+ XMLName xml.Name `xml:"Expiration"`
+ Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
+ Date string `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date, not recommended
+ CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired
+ ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"` // Specifies whether the expired delete tag is automatically deleted
}
-type lifecycleXML struct {
- XMLName xml.Name `xml:"LifecycleConfiguration"`
- Rules []lifecycleRule `xml:"Rule"`
+// LifecycleTransition defines the rule's transition propery
+type LifecycleTransition struct {
+ XMLName xml.Name `xml:"Transition"`
+ Days int `xml:"Days,omitempty"` // Relative transition time: The transition time in days after the last modified time
+ CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired
+ StorageClass StorageClassType `xml:"StorageClass,omitempty"` // Specifies the target storage type
+ IsAccessTime *bool `xml:"IsAccessTime,omitempty"` // access time
+ ReturnToStdWhenVisit *bool `xml:"ReturnToStdWhenVisit,omitempty"` // Return To Std When Visit
+ AllowSmallFile *bool `xml:AllowSmallFile,omitempty`
}
-type lifecycleRule struct {
- XMLName xml.Name `xml:"Rule"`
- ID string `xml:"ID"`
- Prefix string `xml:"Prefix"`
- Status string `xml:"Status"`
- Expiration lifecycleExpiration `xml:"Expiration"`
+// LifecycleAbortMultipartUpload defines the rule's abort multipart upload propery
+type LifecycleAbortMultipartUpload struct {
+ XMLName xml.Name `xml:"AbortMultipartUpload"`
+ Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
+ CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired
}
-type lifecycleExpiration struct {
- XMLName xml.Name `xml:"Expiration"`
- Days int `xml:"Days,omitempty"`
- Date string `xml:"Date,omitempty"`
+// LifecycleVersionExpiration defines the rule's NoncurrentVersionExpiration propery
+type LifecycleVersionExpiration struct {
+ XMLName xml.Name `xml:"NoncurrentVersionExpiration"`
+ NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version
}
-const expirationDateFormat = "2006-01-02T15:04:05.000Z"
+// LifecycleVersionTransition defines the rule's NoncurrentVersionTransition propery
+type LifecycleVersionTransition struct {
+ XMLName xml.Name `xml:"NoncurrentVersionTransition"`
+ NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version
+ StorageClass StorageClassType `xml:"StorageClass,omitempty"`
+ IsAccessTime *bool `xml:"IsAccessTime,omitempty"` // access time
+ ReturnToStdWhenVisit *bool `xml:"ReturnToStdWhenVisit,omitempty"` // Return To Std When Visit
+ AllowSmallFile *bool `xml:AllowSmallFile,omitempty`
+}
-func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
- rs := []lifecycleRule{}
- for _, rule := range rules {
- r := lifecycleRule{}
- r.ID = rule.ID
- r.Prefix = rule.Prefix
- r.Status = rule.Status
- if rule.Expiration.Date.IsZero() {
- r.Expiration.Days = rule.Expiration.Days
- } else {
- r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat)
- }
- rs = append(rs, r)
- }
- return rs
+// LifecycleFilter defines the rule's Filter propery
+type LifecycleFilter struct {
+ XMLName xml.Name `xml:"Filter"`
+ Not []LifecycleFilterNot `xml:"Not,omitempty"`
+ ObjectSizeGreaterThan *int64 `xml:"ObjectSizeGreaterThan,omitempty"`
+ ObjectSizeLessThan *int64 `xml:"ObjectSizeLessThan,omitempty"`
}
-// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days
+// LifecycleFilterNot defines the rule's Filter Not propery
+type LifecycleFilterNot struct {
+ XMLName xml.Name `xml:"Not"`
+ Prefix string `xml:"Prefix"` //Object prefix applicable to this exclusion rule
+ Tag *Tag `xml:"Tag,omitempty"` //the tags applicable to this exclusion rule
+}
+
+const iso8601DateFormat = "2006-01-02T15:04:05.000Z"
+const iso8601DateFormatSecond = "2006-01-02T15:04:05Z"
+
+// BuildLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time
func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
var statusStr = "Enabled"
if !status {
statusStr = "Disabled"
}
return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
- Expiration: LifecycleExpiration{Days: days}}
+ Expiration: &LifecycleExpiration{Days: days}}
}
-// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time.
+// BuildLifecycleRuleByDate builds a lifecycle rule objects will expiration in specified date
func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
var statusStr = "Enabled"
if !status {
statusStr = "Disabled"
}
- date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
+ date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat)
return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
- Expiration: LifecycleExpiration{Date: date}}
+ Expiration: &LifecycleExpiration{Date: date}}
+}
+
+// ValidateLifecycleRule Determine if a lifecycle rule is valid, if it is invalid, it will return an error.
+func verifyLifecycleRules(rules []LifecycleRule) error {
+ if len(rules) == 0 {
+ return fmt.Errorf("invalid rules, the length of rules is zero")
+ }
+ for k, rule := range rules {
+ if rule.Status != "Enabled" && rule.Status != "Disabled" {
+ return fmt.Errorf("invalid rule, the value of status must be Enabled or Disabled")
+ }
+
+ abortMPU := rule.AbortMultipartUpload
+ if abortMPU != nil {
+ if (abortMPU.Days != 0 && abortMPU.CreatedBeforeDate != "") || (abortMPU.Days == 0 && abortMPU.CreatedBeforeDate == "") {
+ return fmt.Errorf("invalid abort multipart upload lifecycle, must be set one of CreatedBeforeDate and Days")
+ }
+ }
+
+ transitions := rule.Transitions
+ if len(transitions) > 0 {
+ for _, transition := range transitions {
+ if (transition.Days != 0 && transition.CreatedBeforeDate != "") || (transition.Days == 0 && transition.CreatedBeforeDate == "") {
+ return fmt.Errorf("invalid transition lifecycle, must be set one of CreatedBeforeDate and Days")
+ }
+ }
+ }
+
+ // NonVersionTransition is not suggested to use
+ // to keep compatible
+ if rule.NonVersionTransition != nil && len(rule.NonVersionTransitions) > 0 {
+ return fmt.Errorf("NonVersionTransition and NonVersionTransitions cannot both have values")
+ } else if rule.NonVersionTransition != nil {
+ rules[k].NonVersionTransitions = append(rules[k].NonVersionTransitions, *rule.NonVersionTransition)
+ }
+ }
+
+ return nil
}
// GetBucketLifecycleResult defines GetBucketLifecycle's result object
@@ -120,14 +209,20 @@ type GetBucketLifecycleResult LifecycleConfiguration
// RefererXML defines Referer configuration
type RefererXML struct {
- XMLName xml.Name `xml:"RefererConfiguration"`
- AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer
- RefererList []string `xml:"RefererList>Referer"` // Referer whitelist
+ XMLName xml.Name `xml:"RefererConfiguration"`
+ AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer
+ AllowTruncateQueryString *bool `xml:"AllowTruncateQueryString,omitempty"`
+ RefererList []string `xml:"RefererList>Referer"` // Referer whitelist
+ RefererBlacklist *RefererBlacklist `xml:"RefererBlacklist,omitempty"` // Referer blacklist
}
// GetBucketRefererResult defines result object for GetBucketReferer request
type GetBucketRefererResult RefererXML
+type RefererBlacklist struct {
+ Referer []string `xml:"Referer,omitempty"`
+}
+
// LoggingXML defines logging configuration
type LoggingXML struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
@@ -151,8 +246,9 @@ type GetBucketLoggingResult LoggingXML
// WebsiteXML defines Website configuration
type WebsiteXML struct {
XMLName xml.Name `xml:"WebsiteConfiguration"`
- IndexDocument IndexDocument `xml:"IndexDocument"` // The index page
- ErrorDocument ErrorDocument `xml:"ErrorDocument"` // The error page
+ IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` // The index page
+ ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` // The error page
+ RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` // The routing Rule list
}
// IndexDocument defines the index page info
@@ -167,13 +263,71 @@ type ErrorDocument struct {
Key string `xml:"Key"` // 404 error file name
}
+// RoutingRule defines the routing rules
+type RoutingRule struct {
+ XMLName xml.Name `xml:"RoutingRule"`
+ RuleNumber int `xml:"RuleNumber,omitempty"` // The routing number
+ Condition Condition `xml:"Condition,omitempty"` // The routing condition
+ Redirect Redirect `xml:"Redirect,omitempty"` // The routing redirect
+
+}
+
+// Condition defines codition in the RoutingRule
+type Condition struct {
+ XMLName xml.Name `xml:"Condition"`
+ KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` // Matching objcet prefix
+ HTTPErrorCodeReturnedEquals int `xml:"HttpErrorCodeReturnedEquals,omitempty"` // The rule is for Accessing to the specified object
+ IncludeHeader []IncludeHeader `xml:"IncludeHeader"` // The rule is for request which include header
+}
+
+// IncludeHeader defines includeHeader in the RoutingRule's Condition
+type IncludeHeader struct {
+ XMLName xml.Name `xml:"IncludeHeader"`
+ Key string `xml:"Key,omitempty"` // The Include header key
+ Equals string `xml:"Equals,omitempty"` // The Include header value
+}
+
+// Redirect defines redirect in the RoutingRule
+type Redirect struct {
+ XMLName xml.Name `xml:"Redirect"`
+ RedirectType string `xml:"RedirectType,omitempty"` // The redirect type, it have Mirror,External,Internal,AliCDN
+ PassQueryString *bool `xml:"PassQueryString"` // Whether to send the specified request's parameters, true or false
+ MirrorURL string `xml:"MirrorURL,omitempty"` // Mirror of the website address back to the source.
+ MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` // To Mirror of the website Whether to send the specified request's parameters, true or false
+ MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` // Redirect the location, if the mirror return 3XX
+ MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` // Check the mirror is MD5.
+ MirrorHeaders MirrorHeaders `xml:"MirrorHeaders,omitempty"` // Mirror headers
+ Protocol string `xml:"Protocol,omitempty"` // The redirect Protocol
+ HostName string `xml:"HostName,omitempty"` // The redirect HostName
+ ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` // object name'Prefix replace the value
+ HttpRedirectCode int `xml:"HttpRedirectCode,omitempty"` // THe redirect http code
+ ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` // object name replace the value
+}
+
+// MirrorHeaders defines MirrorHeaders in the Redirect
+type MirrorHeaders struct {
+ XMLName xml.Name `xml:"MirrorHeaders"`
+ PassAll *bool `xml:"PassAll"` // Penetrating all of headers to source website.
+ Pass []string `xml:"Pass"` // Penetrating some of headers to source website.
+ Remove []string `xml:"Remove"` // Prohibit passthrough some of headers to source website
+ Set []MirrorHeaderSet `xml:"Set"` // Setting some of headers send to source website
+}
+
+// MirrorHeaderSet defines Set for Redirect's MirrorHeaders
+type MirrorHeaderSet struct {
+ XMLName xml.Name `xml:"Set"`
+ Key string `xml:"Key,omitempty"` // The mirror header key
+ Value string `xml:"Value,omitempty"` // The mirror header value
+}
+
// GetBucketWebsiteResult defines the result from GetBucketWebsite request.
type GetBucketWebsiteResult WebsiteXML
// CORSXML defines CORS configuration
type CORSXML struct {
- XMLName xml.Name `xml:"CORSConfiguration"`
- CORSRules []CORSRule `xml:"CORSRule"` // CORS rules
+ XMLName xml.Name `xml:"CORSConfiguration"`
+ CORSRules []CORSRule `xml:"CORSRule"` // CORS rules
+ ResponseVary *bool `xml:"ResponseVary,omitempty"` // return Vary or not
}
// CORSRule defines CORS rules
@@ -189,6 +343,9 @@ type CORSRule struct {
// GetBucketCORSResult defines the result from GetBucketCORS request.
type GetBucketCORSResult CORSXML
+// PutBucketCORS defines the PutBucketCORS config xml.
+type PutBucketCORS CORSXML
+
// GetBucketInfoResult defines the result from GetBucketInfo request.
type GetBucketInfoResult struct {
XMLName xml.Name `xml:"BucketInfo"`
@@ -197,15 +354,28 @@ type GetBucketInfoResult struct {
// BucketInfo defines Bucket information
type BucketInfo struct {
- XMLName xml.Name `xml:"Bucket"`
- Name string `xml:"Name"` // Bucket name
- Location string `xml:"Location"` // Bucket datacenter
- CreationDate time.Time `xml:"CreationDate"` // Bucket creation time
- ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint
- IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint
- ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
- Owner Owner `xml:"Owner"` // Bucket owner
- StorageClass string `xml:"StorageClass"` // Bucket storage class
+ XMLName xml.Name `xml:"Bucket"`
+ Name string `xml:"Name"` // Bucket name
+ AccessMonitor string `xml:"AccessMonitor"` // Bucket Access Monitor
+ Location string `xml:"Location"` // Bucket datacenter
+ CreationDate time.Time `xml:"CreationDate"` // Bucket creation time
+ ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint
+ IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint
+ ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
+ RedundancyType string `xml:"DataRedundancyType"` // Bucket DataRedundancyType
+ Owner Owner `xml:"Owner"` // Bucket owner
+ StorageClass string `xml:"StorageClass"` // Bucket storage class
+ SseRule SSERule `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule
+ Versioning string `xml:"Versioning"` // Bucket Versioning
+ TransferAcceleration string `xml:"TransferAcceleration"` // bucket TransferAcceleration
+ CrossRegionReplication string `xml:"CrossRegionReplication"` // bucket CrossRegionReplication
+}
+
+type SSERule struct {
+ XMLName xml.Name `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule
+ KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` // Bucket KMSMasterKeyID
+ SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` // Bucket SSEAlgorithm
+ KMSDataEncryption string `xml:"KMSDataEncryption,omitempty"` //Bucket KMSDataEncryption
}
// ListObjectsResult defines the result from ListObjects request
@@ -224,13 +394,69 @@ type ListObjectsResult struct {
// ObjectProperties defines Objecct properties
type ObjectProperties struct {
XMLName xml.Name `xml:"Contents"`
- Key string `xml:"Key"` // Object key
- Type string `xml:"Type"` // Object type
- Size int64 `xml:"Size"` // Object size
- ETag string `xml:"ETag"` // Object ETag
- Owner Owner `xml:"Owner"` // Object owner information
+ Key string `xml:"Key"` // Object key
+ Type string `xml:"Type"` // Object type
+ Size int64 `xml:"Size"` // Object size
+ ETag string `xml:"ETag"` // Object ETag
+ Owner Owner `xml:"Owner"` // Object owner information
+ LastModified time.Time `xml:"LastModified"` // Object last modified time
+ StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
+ RestoreInfo string `xml:"RestoreInfo,omitempty"` // Object restoreInfo
+}
+
+// ListObjectsResultV2 defines the result from ListObjectsV2 request
+type ListObjectsResultV2 struct {
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Prefix string `xml:"Prefix"` // The object prefix
+ StartAfter string `xml:"StartAfter"` // the input StartAfter
+ ContinuationToken string `xml:"ContinuationToken"` // the input ContinuationToken
+ MaxKeys int `xml:"MaxKeys"` // Max keys to return
+ Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name
+ IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false)
+ NextContinuationToken string `xml:"NextContinuationToken"` // The start point of the next NextContinuationToken
+ Objects []ObjectProperties `xml:"Contents"` // Object list
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter
+}
+
+// ListObjectVersionsResult defines the result from ListObjectVersions request
+type ListObjectVersionsResult struct {
+ XMLName xml.Name `xml:"ListVersionsResult"`
+ Name string `xml:"Name"` // The Bucket Name
+ Owner Owner `xml:"Owner"` // The owner of bucket
+ Prefix string `xml:"Prefix"` // The object prefix
+ KeyMarker string `xml:"KeyMarker"` // The start marker filter.
+ VersionIdMarker string `xml:"VersionIdMarker"` // The start VersionIdMarker filter.
+ MaxKeys int `xml:"MaxKeys"` // Max keys to return
+ Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name
+ IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false)
+ NextKeyMarker string `xml:"NextKeyMarker"` // The start point of the next query
+ NextVersionIdMarker string `xml:"NextVersionIdMarker"` // The start point of the next query
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter
+ ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` // DeleteMarker list
+ ObjectVersions []ObjectVersionProperties `xml:"Version"` // version list
+}
+
+type ObjectDeleteMarkerProperties struct {
+ XMLName xml.Name `xml:"DeleteMarker"`
+ Key string `xml:"Key"` // The Object Key
+ VersionId string `xml:"VersionId"` // The Object VersionId
+ IsLatest bool `xml:"IsLatest"` // is current version or not
LastModified time.Time `xml:"LastModified"` // Object last modified time
- StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
+ Owner Owner `xml:"Owner"` // bucket owner element
+}
+
+type ObjectVersionProperties struct {
+ XMLName xml.Name `xml:"Version"`
+ Key string `xml:"Key"` // The Object Key
+ VersionId string `xml:"VersionId"` // The Object VersionId
+ IsLatest bool `xml:"IsLatest"` // is latest version or not
+ LastModified time.Time `xml:"LastModified"` // Object last modified time
+ Type string `xml:"Type"` // Object type
+ Size int64 `xml:"Size"` // Object size
+ ETag string `xml:"ETag"` // Object ETag
+ StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
+ Owner Owner `xml:"Owner"` // bucket owner element
+ RestoreInfo string `xml:"RestoreInfo,omitempty"` // Object restoreInfo
}
// Owner defines Bucket/Object's owner
@@ -258,14 +484,30 @@ type deleteXML struct {
// DeleteObject defines the struct for deleting object
type DeleteObject struct {
- XMLName xml.Name `xml:"Object"`
- Key string `xml:"Key"` // Object name
+ XMLName xml.Name `xml:"Object"`
+ Key string `xml:"Key"` // Object name
+ VersionId string `xml:"VersionId,omitempty"` // Object VersionId
}
// DeleteObjectsResult defines result of DeleteObjects request
type DeleteObjectsResult struct {
- XMLName xml.Name `xml:"DeleteResult"`
- DeletedObjects []string `xml:"Deleted>Key"` // Deleted object list
+ XMLName xml.Name
+ DeletedObjects []string // Deleted object key list
+}
+
+// DeleteObjectVersionsResult defines result of DeleteObjects request
+type DeleteObjectVersionsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjectsDetail []DeletedKeyInfo `xml:"Deleted"` // Deleted object detail info
+}
+
+// DeletedKeyInfo defines object delete info
+type DeletedKeyInfo struct {
+ XMLName xml.Name `xml:"Deleted"`
+ Key string `xml:"Key"` // Object key
+ VersionId string `xml:"VersionId"` // VersionId
+ DeleteMarker bool `xml:"DeleteMarker"` // Object DeleteMarker
+ DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` // Object DeleteMarkerVersionId
}
// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request
@@ -283,17 +525,17 @@ type UploadPart struct {
ETag string `xml:"ETag"` // ETag value of the part's data
}
-type uploadParts []UploadPart
+type UploadParts []UploadPart
-func (slice uploadParts) Len() int {
+func (slice UploadParts) Len() int {
return len(slice)
}
-func (slice uploadParts) Less(i, j int) bool {
+func (slice UploadParts) Less(i, j int) bool {
return slice[i].PartNumber < slice[j].PartNumber
}
-func (slice uploadParts) Swap(i, j int) {
+func (slice UploadParts) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
@@ -371,11 +613,18 @@ type ProcessObjectResult struct {
Status string `json:"status"`
}
+// AsyncProcessObjectResult defines result object of AsyncProcessObject
+type AsyncProcessObjectResult struct {
+ EventId string `json:"EventId"`
+ RequestId string `json:"RequestId"`
+ TaskId string `json:"TaskId"`
+}
+
// decodeDeleteObjectsResult decodes deleting objects result in URL encoding
-func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
+func decodeDeleteObjectsResult(result *DeleteObjectVersionsResult) error {
var err error
- for i := 0; i < len(result.DeletedObjects); i++ {
- result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i])
+ for i := 0; i < len(result.DeletedObjectsDetail); i++ {
+ result.DeletedObjectsDetail[i].Key, err = url.QueryUnescape(result.DeletedObjectsDetail[i].Key)
if err != nil {
return err
}
@@ -417,6 +666,107 @@ func decodeListObjectsResult(result *ListObjectsResult) error {
return nil
}
+// decodeListObjectsResult decodes list objects result in URL encoding
+func decodeListObjectsResultV2(result *ListObjectsResultV2) error {
+ var err error
+ result.Prefix, err = url.QueryUnescape(result.Prefix)
+ if err != nil {
+ return err
+ }
+ result.StartAfter, err = url.QueryUnescape(result.StartAfter)
+ if err != nil {
+ return err
+ }
+ result.Delimiter, err = url.QueryUnescape(result.Delimiter)
+ if err != nil {
+ return err
+ }
+ result.NextContinuationToken, err = url.QueryUnescape(result.NextContinuationToken)
+ if err != nil {
+ return err
+ }
+ for i := 0; i < len(result.Objects); i++ {
+ result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key)
+ if err != nil {
+ return err
+ }
+ }
+ for i := 0; i < len(result.CommonPrefixes); i++ {
+ result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// decodeListObjectVersionsResult decodes list version objects result in URL encoding
+func decodeListObjectVersionsResult(result *ListObjectVersionsResult) error {
+ var err error
+
+ // decode:Delimiter
+ result.Delimiter, err = url.QueryUnescape(result.Delimiter)
+ if err != nil {
+ return err
+ }
+
+ // decode Prefix
+ result.Prefix, err = url.QueryUnescape(result.Prefix)
+ if err != nil {
+ return err
+ }
+
+ // decode KeyMarker
+ result.KeyMarker, err = url.QueryUnescape(result.KeyMarker)
+ if err != nil {
+ return err
+ }
+
+ // decode VersionIdMarker
+ result.VersionIdMarker, err = url.QueryUnescape(result.VersionIdMarker)
+ if err != nil {
+ return err
+ }
+
+ // decode NextKeyMarker
+ result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker)
+ if err != nil {
+ return err
+ }
+
+ // decode NextVersionIdMarker
+ result.NextVersionIdMarker, err = url.QueryUnescape(result.NextVersionIdMarker)
+ if err != nil {
+ return err
+ }
+
+ // decode CommonPrefixes
+ for i := 0; i < len(result.CommonPrefixes); i++ {
+ result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ // decode deleteMarker
+ for i := 0; i < len(result.ObjectDeleteMarkers); i++ {
+ result.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(result.ObjectDeleteMarkers[i].Key)
+ if err != nil {
+ return err
+ }
+ }
+
+ // decode ObjectVersions
+ for i := 0; i < len(result.ObjectVersions); i++ {
+ result.ObjectVersions[i].Key, err = url.QueryUnescape(result.ObjectVersions[i].Key)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// decodeListUploadedPartsResult decodes
func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error {
var err error
@@ -461,10 +811,39 @@ func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
return nil
}
+// marshalDeleteObjectToXml deleteXML struct to xml
+func marshalDeleteObjectToXml(dxml deleteXML) string {
+ var builder strings.Builder
+ builder.WriteString("")
+ builder.WriteString("")
+ builder.WriteString(strconv.FormatBool(dxml.Quiet))
+ builder.WriteString("")
+ if len(dxml.Objects) > 0 {
+ for _, object := range dxml.Objects {
+ builder.WriteString("")
+ }
+ }
+ builder.WriteString("")
+ return builder.String()
+}
+
// createBucketConfiguration defines the configuration for creating a bucket.
type createBucketConfiguration struct {
- XMLName xml.Name `xml:"CreateBucketConfiguration"`
- StorageClass StorageClassType `xml:"StorageClass,omitempty"`
+ XMLName xml.Name `xml:"CreateBucketConfiguration"`
+ StorageClass StorageClassType `xml:"StorageClass,omitempty"`
+ DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"`
+ ObjectHashFunction ObjecthashFuncType `xml:"ObjectHashFunction,omitempty"`
}
// LiveChannelConfiguration defines the configuration for live-channel
@@ -564,3 +943,753 @@ type LiveChannelInfo struct {
PublishUrls []string `xml:"PublishUrls>Url"` //push urls list
PlayUrls []string `xml:"PlayUrls>Url"` //play urls list
}
+
+// Tag a tag for the object
+type Tag struct {
+ XMLName xml.Name `xml:"Tag"`
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+// Tagging tag set for the object
+type Tagging struct {
+ XMLName xml.Name `xml:"Tagging"`
+ Tags []Tag `xml:"TagSet>Tag,omitempty"`
+}
+
+// GetObjectTaggingResult for GetObjectTagging return value
+type GetObjectTaggingResult Tagging
+
+// VersioningConfig for the bucket
+type VersioningConfig struct {
+ XMLName xml.Name `xml:"VersioningConfiguration"`
+ Status string `xml:"Status"`
+}
+
+type GetBucketVersioningResult VersioningConfig
+
+// ServerEncryptionRule Server Encryption rule for the bucket
+type ServerEncryptionRule struct {
+ XMLName xml.Name `xml:"ServerSideEncryptionRule"`
+ SSEDefault SSEDefaultRule `xml:"ApplyServerSideEncryptionByDefault"`
+}
+
+// SSEDefaultRule Server Encryption deafult rule for the bucket
+type SSEDefaultRule struct {
+ XMLName xml.Name `xml:"ApplyServerSideEncryptionByDefault"`
+ SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"`
+ KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
+ KMSDataEncryption string `xml:"KMSDataEncryption,,omitempty"`
+}
+
+type GetBucketEncryptionResult ServerEncryptionRule
+type GetBucketTaggingResult Tagging
+
+type BucketStat struct {
+ XMLName xml.Name `xml:"BucketStat"`
+ Storage int64 `xml:"Storage"`
+ ObjectCount int64 `xml:"ObjectCount"`
+ MultipartUploadCount int64 `xml:"MultipartUploadCount"`
+ LiveChannelCount int64 `xml:"LiveChannelCount"`
+ LastModifiedTime int64 `xml:"LastModifiedTime"`
+ StandardStorage int64 `xml:"StandardStorage"`
+ StandardObjectCount int64 `xml:"StandardObjectCount"`
+ InfrequentAccessStorage int64 `xml:"InfrequentAccessStorage"`
+ InfrequentAccessRealStorage int64 `xml:"InfrequentAccessRealStorage"`
+ InfrequentAccessObjectCount int64 `xml:"InfrequentAccessObjectCount"`
+ ArchiveStorage int64 `xml:"ArchiveStorage"`
+ ArchiveRealStorage int64 `xml:"ArchiveRealStorage"`
+ ArchiveObjectCount int64 `xml:"ArchiveObjectCount"`
+ ColdArchiveStorage int64 `xml:"ColdArchiveStorage"`
+ ColdArchiveRealStorage int64 `xml:"ColdArchiveRealStorage"`
+ ColdArchiveObjectCount int64 `xml:"ColdArchiveObjectCount"`
+}
+type GetBucketStatResult BucketStat
+
+// RequestPaymentConfiguration define the request payment configuration
+type RequestPaymentConfiguration struct {
+ XMLName xml.Name `xml:"RequestPaymentConfiguration"`
+ Payer string `xml:"Payer,omitempty"`
+}
+
+// BucketQoSConfiguration define QoS configuration
+type BucketQoSConfiguration struct {
+ XMLName xml.Name `xml:"QoSConfiguration"`
+ TotalUploadBandwidth *int `xml:"TotalUploadBandwidth"` // Total upload bandwidth
+ IntranetUploadBandwidth *int `xml:"IntranetUploadBandwidth"` // Intranet upload bandwidth
+ ExtranetUploadBandwidth *int `xml:"ExtranetUploadBandwidth"` // Extranet upload bandwidth
+ TotalDownloadBandwidth *int `xml:"TotalDownloadBandwidth"` // Total download bandwidth
+ IntranetDownloadBandwidth *int `xml:"IntranetDownloadBandwidth"` // Intranet download bandwidth
+ ExtranetDownloadBandwidth *int `xml:"ExtranetDownloadBandwidth"` // Extranet download bandwidth
+ TotalQPS *int `xml:"TotalQps"` // Total Qps
+ IntranetQPS *int `xml:"IntranetQps"` // Intranet Qps
+ ExtranetQPS *int `xml:"ExtranetQps"` // Extranet Qps
+}
+
+// UserQoSConfiguration define QoS and Range configuration
+type UserQoSConfiguration struct {
+ XMLName xml.Name `xml:"QoSConfiguration"`
+ Region string `xml:"Region,omitempty"` // Effective area of Qos configuration
+ BucketQoSConfiguration
+}
+
+//////////////////////////////////////////////////////////////
+/////////////////// Select OBject ////////////////////////////
+//////////////////////////////////////////////////////////////
+
+type CsvMetaRequest struct {
+ XMLName xml.Name `xml:"CsvMetaRequest"`
+ InputSerialization InputSerialization `xml:"InputSerialization"`
+ OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"`
+}
+
+// encodeBase64 encode base64 of the CreateSelectObjectMeta api request params
+func (meta *CsvMetaRequest) encodeBase64() {
+ meta.InputSerialization.CSV.RecordDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.RecordDelimiter))
+ meta.InputSerialization.CSV.FieldDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.FieldDelimiter))
+ meta.InputSerialization.CSV.QuoteCharacter =
+ base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.QuoteCharacter))
+}
+
+type JsonMetaRequest struct {
+ XMLName xml.Name `xml:"JsonMetaRequest"`
+ InputSerialization InputSerialization `xml:"InputSerialization"`
+ OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"`
+}
+
+type InputSerialization struct {
+ XMLName xml.Name `xml:"InputSerialization"`
+ CSV CSV `xml:CSV,omitempty`
+ JSON JSON `xml:JSON,omitempty`
+ CompressionType string `xml:"CompressionType,omitempty"`
+}
+type CSV struct {
+ XMLName xml.Name `xml:"CSV"`
+ RecordDelimiter string `xml:"RecordDelimiter,omitempty"`
+ FieldDelimiter string `xml:"FieldDelimiter,omitempty"`
+ QuoteCharacter string `xml:"QuoteCharacter,omitempty"`
+}
+
+type JSON struct {
+ XMLName xml.Name `xml:"JSON"`
+ JSONType string `xml:"Type,omitempty"`
+}
+
+// SelectRequest is for the SelectObject request params of json file
+type SelectRequest struct {
+ XMLName xml.Name `xml:"SelectRequest"`
+ Expression string `xml:"Expression"`
+ InputSerializationSelect InputSerializationSelect `xml:"InputSerialization"`
+ OutputSerializationSelect OutputSerializationSelect `xml:"OutputSerialization"`
+ SelectOptions SelectOptions `xml:"Options,omitempty"`
+}
+type InputSerializationSelect struct {
+ XMLName xml.Name `xml:"InputSerialization"`
+ CsvBodyInput CSVSelectInput `xml:CSV,omitempty`
+ JsonBodyInput JSONSelectInput `xml:JSON,omitempty`
+ CompressionType string `xml:"CompressionType,omitempty"`
+}
+type CSVSelectInput struct {
+ XMLName xml.Name `xml:"CSV"`
+ FileHeaderInfo string `xml:"FileHeaderInfo,omitempty"`
+ RecordDelimiter string `xml:"RecordDelimiter,omitempty"`
+ FieldDelimiter string `xml:"FieldDelimiter,omitempty"`
+ QuoteCharacter string `xml:"QuoteCharacter,omitempty"`
+ CommentCharacter string `xml:"CommentCharacter,omitempty"`
+ Range string `xml:"Range,omitempty"`
+ SplitRange string
+}
+type JSONSelectInput struct {
+ XMLName xml.Name `xml:"JSON"`
+ JSONType string `xml:"Type,omitempty"`
+ Range string `xml:"Range,omitempty"`
+ ParseJSONNumberAsString *bool `xml:"ParseJsonNumberAsString"`
+ SplitRange string
+}
+
+func (jsonInput *JSONSelectInput) JsonIsEmpty() bool {
+ if jsonInput.JSONType != "" {
+ return false
+ }
+ return true
+}
+
+type OutputSerializationSelect struct {
+ XMLName xml.Name `xml:"OutputSerialization"`
+ CsvBodyOutput CSVSelectOutput `xml:CSV,omitempty`
+ JsonBodyOutput JSONSelectOutput `xml:JSON,omitempty`
+ OutputRawData *bool `xml:"OutputRawData,omitempty"`
+ KeepAllColumns *bool `xml:"KeepAllColumns,omitempty"`
+ EnablePayloadCrc *bool `xml:"EnablePayloadCrc,omitempty"`
+ OutputHeader *bool `xml:"OutputHeader,omitempty"`
+}
+type CSVSelectOutput struct {
+ XMLName xml.Name `xml:"CSV"`
+ RecordDelimiter string `xml:"RecordDelimiter,omitempty"`
+ FieldDelimiter string `xml:"FieldDelimiter,omitempty"`
+}
+type JSONSelectOutput struct {
+ XMLName xml.Name `xml:"JSON"`
+ RecordDelimiter string `xml:"RecordDelimiter,omitempty"`
+}
+
+func (selectReq *SelectRequest) encodeBase64() {
+ if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
+ selectReq.csvEncodeBase64()
+ } else {
+ selectReq.jsonEncodeBase64()
+ }
+}
+
+// csvEncodeBase64 encode base64 of the SelectObject api request params
+func (selectReq *SelectRequest) csvEncodeBase64() {
+ selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression))
+ selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter))
+ selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter))
+ selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter))
+ selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter))
+ selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter))
+ selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter))
+
+ // handle Range
+ if selectReq.InputSerializationSelect.CsvBodyInput.Range != "" {
+ selectReq.InputSerializationSelect.CsvBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.CsvBodyInput.Range
+ }
+
+ if selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != "" {
+ selectReq.InputSerializationSelect.CsvBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.CsvBodyInput.SplitRange
+ }
+}
+
+// jsonEncodeBase64 encode base64 of the SelectObject api request params
+func (selectReq *SelectRequest) jsonEncodeBase64() {
+ selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression))
+ selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter =
+ base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter))
+
+ // handle Range
+ if selectReq.InputSerializationSelect.JsonBodyInput.Range != "" {
+ selectReq.InputSerializationSelect.JsonBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.JsonBodyInput.Range
+ }
+
+ if selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != "" {
+ selectReq.InputSerializationSelect.JsonBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.JsonBodyInput.SplitRange
+ }
+}
+
+// SelectOptions is a element in the SelectObject api request's params
+type SelectOptions struct {
+ XMLName xml.Name `xml:"Options"`
+ SkipPartialDataRecord *bool `xml:"SkipPartialDataRecord,omitempty"`
+ MaxSkippedRecordsAllowed string `xml:"MaxSkippedRecordsAllowed,omitempty"`
+}
+
+// SelectObjectResult is the SelectObject api's return
+type SelectObjectResult struct {
+ Version byte
+ FrameType int32
+ PayloadLength int32
+ HeaderCheckSum uint32
+ Offset uint64
+ Data string // DataFrame
+ EndFrame EndFrame // EndFrame
+ MetaEndFrameCSV MetaEndFrameCSV // MetaEndFrameCSV
+ MetaEndFrameJSON MetaEndFrameJSON // MetaEndFrameJSON
+ PayloadChecksum uint32
+ ReadFlagInfo
+}
+
+// ReadFlagInfo if reading the frame data, recode the reading status
+type ReadFlagInfo struct {
+ OpenLine bool
+ ConsumedBytesLength int32
+ EnablePayloadCrc bool
+ OutputRawData bool
+}
+
+// EndFrame is EndFrameType of SelectObject api
+type EndFrame struct {
+ TotalScanned int64
+ HTTPStatusCode int32
+ ErrorMsg string
+}
+
+// MetaEndFrameCSV is MetaEndFrameCSVType of CreateSelectObjectMeta
+type MetaEndFrameCSV struct {
+ TotalScanned int64
+ Status int32
+ SplitsCount int32
+ RowsCount int64
+ ColumnsCount int32
+ ErrorMsg string
+}
+
+// MetaEndFrameJSON is MetaEndFrameJSON of CreateSelectObjectMeta
+type MetaEndFrameJSON struct {
+ TotalScanned int64
+ Status int32
+ SplitsCount int32
+ RowsCount int64
+ ErrorMsg string
+}
+
+// InventoryConfiguration is Inventory config
+type InventoryConfiguration struct {
+ XMLName xml.Name `xml:"InventoryConfiguration"`
+ Id string `xml:"Id,omitempty"`
+ IsEnabled *bool `xml:"IsEnabled,omitempty"`
+ Prefix string `xml:"Filter>Prefix,omitempty"`
+ OSSBucketDestination OSSBucketDestination `xml:"Destination>OSSBucketDestination,omitempty"`
+ Frequency string `xml:"Schedule>Frequency,omitempty"`
+ IncludedObjectVersions string `xml:"IncludedObjectVersions,omitempty"`
+ OptionalFields OptionalFields `xml:OptionalFields,omitempty`
+}
+
+type OptionalFields struct {
+ XMLName xml.Name `xml:"OptionalFields,omitempty`
+ Field []string `xml:"Field,omitempty`
+}
+
+type OSSBucketDestination struct {
+ XMLName xml.Name `xml:"OSSBucketDestination"`
+ Format string `xml:"Format,omitempty"`
+ AccountId string `xml:"AccountId,omitempty"`
+ RoleArn string `xml:"RoleArn,omitempty"`
+ Bucket string `xml:"Bucket,omitempty"`
+ Prefix string `xml:"Prefix,omitempty"`
+ Encryption *InvEncryption `xml:"Encryption,omitempty"`
+}
+
+type InvEncryption struct {
+ XMLName xml.Name `xml:"Encryption"`
+ SseOss *InvSseOss `xml:"SSE-OSS"`
+ SseKms *InvSseKms `xml:"SSE-KMS"`
+}
+
+type InvSseOss struct {
+ XMLName xml.Name `xml:"SSE-OSS"`
+}
+
+type InvSseKms struct {
+ XMLName xml.Name `xml:"SSE-KMS"`
+ KmsId string `xml:"KeyId,omitempty"`
+}
+
+type ListInventoryConfigurationsResult struct {
+ XMLName xml.Name `xml:"ListInventoryConfigurationsResult"`
+ InventoryConfiguration []InventoryConfiguration `xml:"InventoryConfiguration,omitempty`
+ IsTruncated *bool `xml:"IsTruncated,omitempty"`
+ NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
+}
+
+// RestoreConfiguration for RestoreObject
+type RestoreConfiguration struct {
+ XMLName xml.Name `xml:"RestoreRequest"`
+ Days int32 `xml:"Days,omitempty"`
+ Tier string `xml:"JobParameters>Tier,omitempty"`
+}
+
+// AsyncFetchTaskConfiguration for SetBucketAsyncFetchTask
+type AsyncFetchTaskConfiguration struct {
+ XMLName xml.Name `xml:"AsyncFetchTaskConfiguration"`
+ Url string `xml:"Url,omitempty"`
+ Object string `xml:"Object,omitempty"`
+ Host string `xml:"Host,omitempty"`
+ ContentMD5 string `xml:"ContentMD5,omitempty"`
+ Callback string `xml:"Callback,omitempty"`
+ StorageClass string `xml:"StorageClass,omitempty"`
+ IgnoreSameKey bool `xml:"IgnoreSameKey"`
+}
+
+// AsyncFetchTaskResult for SetBucketAsyncFetchTask result
+type AsyncFetchTaskResult struct {
+ XMLName xml.Name `xml:"AsyncFetchTaskResult"`
+ TaskId string `xml:"TaskId,omitempty"`
+}
+
+// AsynFetchTaskInfo for GetBucketAsyncFetchTask result
+type AsynFetchTaskInfo struct {
+ XMLName xml.Name `xml:"AsyncFetchTaskInfo"`
+ TaskId string `xml:"TaskId,omitempty"`
+ State string `xml:"State,omitempty"`
+ ErrorMsg string `xml:"ErrorMsg,omitempty"`
+ TaskInfo AsyncTaskInfo `xml:"TaskInfo,omitempty"`
+}
+
+// AsyncTaskInfo for async task information
+type AsyncTaskInfo struct {
+ XMLName xml.Name `xml:"TaskInfo"`
+ Url string `xml:"Url,omitempty"`
+ Object string `xml:"Object,omitempty"`
+ Host string `xml:"Host,omitempty"`
+ ContentMD5 string `xml:"ContentMD5,omitempty"`
+ Callback string `xml:"Callback,omitempty"`
+ StorageClass string `xml:"StorageClass,omitempty"`
+ IgnoreSameKey bool `xml:"IgnoreSameKey"`
+}
+
+// InitiateWormConfiguration define InitiateBucketWorm configuration
+type InitiateWormConfiguration struct {
+ XMLName xml.Name `xml:"InitiateWormConfiguration"`
+ RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days
+}
+
+// ExtendWormConfiguration define ExtendWormConfiguration configuration
+type ExtendWormConfiguration struct {
+ XMLName xml.Name `xml:"ExtendWormConfiguration"`
+ RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days
+}
+
+// WormConfiguration define WormConfiguration
+type WormConfiguration struct {
+ XMLName xml.Name `xml:"WormConfiguration"`
+ WormId string `xml:"WormId,omitempty"`
+ State string `xml:"State,omitempty"`
+ RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days
+ CreationDate string `xml:"CreationDate,omitempty"`
+}
+
+// TransferAccConfiguration define transfer acceleration configuration
+type TransferAccConfiguration struct {
+ XMLName xml.Name `xml:"TransferAccelerationConfiguration"`
+ Enabled bool `xml:"Enabled"`
+}
+
+// ReplicationXML defines simple replication xml, and ReplicationXML is used for "DeleteBucketReplication" in client.go
+type ReplicationXML struct {
+ XMLName xml.Name `xml:"ReplicationRules"`
+ ID string `xml:"ID,omitempty"`
+}
+
+// PutBucketReplication define the bucket replication config
+type PutBucketReplication BucketReplicationXml
+
+// GetBucketReplicationResult define get bucket's replication config
+type GetBucketReplicationResult BucketReplicationXml
+
+// GetBucketReplicationLocationResult define get bucket's replication location
+type GetBucketReplicationLocationResult BucketReplicationLocationXml
+
+// GetBucketReplicationProgressResult define get bucket's replication progress
+type GetBucketReplicationProgressResult BucketReplicationProgressXml
+
+// PutBucketRTC define the bucket rtc config
+type PutBucketRTC BucketRTCXml
+
+// BucketReplicationXml define the xml of bucket replication config
+type BucketReplicationXml struct {
+ XMLName xml.Name `xml:"ReplicationConfiguration"`
+ Rule []ReplicationRule `xml:"Rule,omitempty"`
+}
+
+// BucketReplicationProgressXml define the xml of bucket replication config
+type BucketReplicationProgressXml struct {
+ XMLName xml.Name `xml:"ReplicationProgress"`
+ Rule []ReplicationRule `xml:"Rule,omitempty"`
+}
+
+// BucketRTCXml define the xml of bucket rtc config
+type BucketRTCXml struct {
+ XMLName xml.Name `xml:"ReplicationRule"`
+ RTC *string `xml:"RTC>Status,omitempty"`
+ ID string `xml:"ID,omitempty"`
+}
+
+// ReplicationRule define the xml of bucket replication config rule
+type ReplicationRule struct {
+ ID string `xml:"ID,omitempty"`
+ RTC *string `xml:"RTC>Status,omitempty"`
+ PrefixSet *ReplicationRulePrefix `xml:"PrefixSet,omitempty"`
+ Action string `xml:"Action,omitempty"`
+ Destination *ReplicationRuleDestination `xml:"Destination,omitempty"`
+ HistoricalObjectReplication string `xml:"HistoricalObjectReplication,omitempty"`
+ Status string `xml:"Status,omitempty"`
+ SyncRole string `xml:"SyncRole,omitempty"`
+ SourceSelectionCriteria *string `xml:"SourceSelectionCriteria>SseKmsEncryptedObjects>Status,omitempty"`
+ EncryptionConfiguration *string `xml:"EncryptionConfiguration>ReplicaKmsKeyID,omitempty"`
+ Progress *ReplicationRuleProgress `xml:"Progress,omitempty"`
+ HistoricalObject string `xml:"HistoricalObject,omitempty"`
+}
+
+type ReplicationRulePrefix struct {
+ Prefix []*string `xml:"Prefix,omitempty"`
+}
+
+type ReplicationRuleDestination struct {
+ Bucket string `xml:"Bucket,omitempty"`
+ Location string `xml:"Location,omitempty"`
+ TransferType string `xml:"TransferType,omitempty"`
+}
+
+// BucketReplicationLocationXml define the xml of bucket replication location info
+type BucketReplicationLocationXml struct {
+ XMLName xml.Name `xml:"ReplicationLocation"`
+ Location []string `xml:"Location,omitempty"`
+ LocationTransferType []ReplicationLocationTransferType `xml:"LocationTransferTypeConstraint>LocationTransferType,omitempty"`
+ RTCLocation []string `xml:"LocationRTCConstraint>Location,omitempty"`
+}
+
+type ReplicationLocation struct {
+ Location string `xml:"Location,omitempty"`
+}
+
+type ReplicationLocationTransferType struct {
+ Location string `xml:"Location,omitempty"`
+ TransferTypes string `xml:"TransferTypes>Type,omitempty"`
+}
+
+type ReplicationRuleProgress struct {
+ HistoricalObject string `xml:"HistoricalObject,omitempty"`
+ NewObject string `xml:"NewObject,omitempty"`
+}
+
+// CnameConfigurationXML define cname configuration
+type CnameConfigurationXML struct {
+ XMLName xml.Name `xml:"BucketCnameConfiguration"`
+ Domain string `xml:"Cname>Domain"`
+}
+
+type PutBucketCname PutBucketCnameXml
+
+// PutBucketCnameXml define cname configuration
+type PutBucketCnameXml struct {
+ XMLName xml.Name `xml:"BucketCnameConfiguration"`
+ Cname string `xml:"Cname>Domain"`
+ CertificateConfiguration *CertificateConfiguration `xml:"Cname>CertificateConfiguration"`
+}
+
+type CertificateConfiguration struct {
+ CertId string `xml:"CertId,omitempty"`
+ Certificate string `xml:"Certificate,omitempty"`
+ PrivateKey string `xml:"PrivateKey,omitempty"`
+ PreviousCertId string `xml:"PreviousCertId,omitempty"`
+ Force bool `xml:"Force,omitempty"`
+ DeleteCertificate bool `xml:"DeleteCertificate,omitempty"`
+}
+
+// CnameTokenXML define cname token information
+type CnameTokenXML struct {
+ XMLName xml.Name `xml:"CnameToken"`
+ Bucket string `xml:"Bucket,omitempty"`
+ Cname string `xml:"Cname,omitempty"`
+ Token string `xml:"Token,omitempty"`
+ ExpireTime string `xml:"ExpireTime,omitempty"`
+}
+
+// CreateBucketCnameTokenResult defines result object for CreateBucketCnameToken request
+type CreateBucketCnameTokenResult CnameTokenXML
+
+// GetBucketCnameTokenResult defines result object for GetBucketCnameToken request
+type GetBucketCnameTokenResult CnameTokenXML
+
+// GetMetaQueryStatusResult defines result for GetMetaQueryStatus result
+type GetMetaQueryStatusResult GetMetaQueryStatusResultXml
+
+// GetMetaQueryStatusResultXml define get meta query status information
+type GetMetaQueryStatusResultXml struct {
+ XMLName xml.Name `xml:"MetaQueryStatus"`
+ State string `xml:"State"`
+ Phase string `xml:"Phase"`
+ CreateTime string `xml:"CreateTime"`
+ UpdateTime string `xml:"UpdateTime"`
+}
+
+// MetaQuery defines meta query struct
+type MetaQuery struct {
+ XMLName xml.Name `xml:"MetaQuery"`
+ NextToken string `xml:"NextToken,omitempty"`
+ MaxResults int64 `xml:"MaxResults,omitempty"`
+ Query string `xml:"Query"`
+ Sort string `xml:"Sort,omitempty"`
+ Order string `xml:"Order,omitempty"`
+ Aggregations []MetaQueryAggregationRequest `xml:"Aggregations>Aggregation,omitempty"`
+}
+
+// MetaQueryAggregationRequest defines meta query aggregation request
+type MetaQueryAggregationRequest struct {
+ XMLName xml.Name `xml:"Aggregation"`
+ Field string `xml:"Field,omitempty"`
+ Operation string `xml:"Operation,omitempty"`
+}
+
+// MetaQueryAggregationResponse defines meta query aggregation response
+type MetaQueryAggregationResponse struct {
+ XMLName xml.Name `xml:"Aggregation"`
+ Field string `xml:"Field,omitempty"`
+ Operation string `xml:"Operation,omitempty"`
+ Value float64 `xml:"Value,omitempty"`
+ Groups []MetaQueryGroup `xml:"Groups>Group,omitempty"`
+}
+
+// DoMetaQueryResult defines result for DoMetaQuery result
+type DoMetaQueryResult DoMetaQueryResultXml
+
+// DoMetaQueryResultXml defines do meta query information
+type DoMetaQueryResultXml struct {
+ XMLName xml.Name `xml:"MetaQuery"`
+ NextToken string `xml:"NextToken,omitempty"` // next token
+ Files []MetaQueryFile `xml:"Files>File,omitempty"` // file
+ Aggregations []MetaQueryAggregationResponse `xml:"Aggregations>Aggregation,omitempty"'` // Aggregation
+}
+
+// MetaQueryFile defines do meta query result file information
+type MetaQueryFile struct {
+ XMLName xml.Name `xml:"File"`
+ Filename string `xml:"Filename"` //file name
+ Size int64 `xml:"Size"` // file size
+ FileModifiedTime string `xml:"FileModifiedTime"` // file Modified Time
+ OssObjectType string `xml:"OSSObjectType"` // Oss Object Type
+ OssStorageClass string `xml:"OSSStorageClass"` // Oss Storage Class
+ ObjectACL string `xml:"ObjectACL"` // Object Acl
+ ETag string `xml:"ETag"` // ETag
+ OssCRC64 string `xml:"OSSCRC64"` // Oss CRC64
+ OssTaggingCount int64 `xml:"OSSTaggingCount,omitempty"` // Oss Tagging Count
+ OssTagging []MetaQueryTagging `xml:"OSSTagging>Tagging,omitempty"` // Tagging
+ OssUserMeta []MetaQueryUserMeta `xml:"OSSUserMeta>UserMeta,omitempty"` // UserMeta
+ ServerSideEncryption string `xml:"ServerSideEncryption,omitempty"` //Server Side Encryption
+ ServerSideEncryptionCustomerAlgorithm string `xml:"ServerSideEncryptionCustomerAlgorithm,omitempty"` // Server Side Encryption Customer Algorithm
+}
+
+// MetaQueryTagging defines do meta query result tagging information
+type MetaQueryTagging struct {
+ XMLName xml.Name `xml:"Tagging"`
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+// MetaQueryUserMeta defines do meta query result user meta information
+type MetaQueryUserMeta struct {
+ XMLName xml.Name `xml:"UserMeta"`
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+// MetaQueryGroup defines do meta query result group information
+type MetaQueryGroup struct {
+ XMLName xml.Name `xml:"Group"`
+ Value string `xml:"Value"`
+ Count int64 `xml:"Count"`
+}
+
+// GetBucketAccessMonitorResult define config for get bucket access monitor
+type GetBucketAccessMonitorResult BucketAccessMonitorXml
+
+// PutBucketAccessMonitor define the xml of bucket access monitor config
+type PutBucketAccessMonitor BucketAccessMonitorXml
+
+// BucketAccessMonitorXml define get bucket access monitor information
+type BucketAccessMonitorXml struct {
+ XMLName xml.Name `xml:"AccessMonitorConfiguration"`
+ Status string `xml:"Status"` // access monitor status
+}
+
+// ListBucketCnameResult define the cname list of the bucket
+type ListBucketCnameResult BucketCnameXml
+
+// BucketCnameXml define get the bucket cname information
+type BucketCnameXml struct {
+ XMLName xml.Name `xml:"ListCnameResult"`
+ Bucket string `xml:"Bucket"`
+ Owner string `xml:"Owner"`
+ Cname []Cname `xml:"Cname"`
+}
+
+// Cname define the cname information
+type Cname struct {
+ Domain string `xml:"Domain"`
+ LastModified string `xml:"LastModified"`
+ Status string `xml:"Status"`
+ Certificate Certificate `xml:"Certificate"`
+}
+
+// Certificate define Details of domain name certificate
+type Certificate struct {
+ Type string `xml:"Type"`
+ CertId string `xml:"CertId"`
+ Status string `xml:"Status"`
+ CreationDate string `xml:"CreationDate"`
+ Fingerprint string `xml:"Fingerprint"`
+ ValidStartDate string `xml:"ValidStartDate"`
+ ValidEndDate string `xml:"ValidEndDate"`
+}
+
+// GetBucketResourceGroupResult define resource group for the bucket
+type GetBucketResourceGroupResult BucketResourceGroupXml
+
+// PutBucketResourceGroup define the xml of bucket's resource group config
+type PutBucketResourceGroup BucketResourceGroupXml
+
+// BucketResourceGroupXml define the information of the bucket's resource group
+type BucketResourceGroupXml struct {
+ XMLName xml.Name `xml:"BucketResourceGroupConfiguration"`
+ ResourceGroupId string `xml:"ResourceGroupId"` // resource groupId
+}
+
+// GetBucketStyleResult define style for the bucket
+type GetBucketStyleResult BucketStyleXml
+
+// GetBucketListStyleResult define the list style for the bucket
+type GetBucketListStyleResult BucketListStyleXml
+
+// BucketListStyleXml define the list style of the bucket
+type BucketListStyleXml struct {
+ XMLName xml.Name `xml:"StyleList"`
+ Style []BucketStyleXml `xml:"Style,omitempty"` // style
+}
+
+// BucketStyleXml define the information of the bucket's style
+type BucketStyleXml struct {
+ XMLName xml.Name `xml:"Style"`
+ Name string `xml:"Name,omitempty"` // style name
+ Content string `xml:"Content"` // style content
+ CreateTime string `xml:"CreateTime,omitempty"` // style create time
+ LastModifyTime string `xml:"LastModifyTime,omitempty"` // style last modify time
+}
+
+// DescribeRegionsResult define get the describe regions result
+type DescribeRegionsResult RegionInfoList
+
+type RegionInfo struct {
+ Region string `xml:"Region"`
+ InternetEndpoint string `xml:"InternetEndpoint"`
+ InternalEndpoint string `xml:"InternalEndpoint"`
+ AccelerateEndpoint string `xml:"AccelerateEndpoint"`
+}
+
+type RegionInfoList struct {
+ XMLName xml.Name `xml:"RegionInfoList"`
+ Regions []RegionInfo `xml:"RegionInfo"`
+}
+
+//PutBucketResponseHeader define the xml of bucket's response header config
+type PutBucketResponseHeader ResponseHeaderXml
+
+//GetBucketResponseHeaderResult define the xml of bucket's response header result
+type GetBucketResponseHeaderResult ResponseHeaderXml
+
+type ResponseHeaderXml struct {
+ XMLName xml.Name `xml:"ResponseHeaderConfiguration"`
+ Rule []ResponseHeaderRule `xml:Rule,omitempty"` // rule
+}
+
+type ResponseHeaderRule struct {
+ Name string `xml:"Name"` // rule name
+ Filters ResponseHeaderRuleFilters `xml:"Filters,omitempty"` // rule filters Operation
+ HideHeaders ResponseHeaderRuleHeaders `xml:"HideHeaders,omitempty"` // rule hide header
+}
+
+type ResponseHeaderRuleFilters struct {
+ Operation []string `xml:"Operation,omitempty"`
+}
+
+type ResponseHeaderRuleHeaders struct {
+ Header []string `xml:"Header,omitempty"`
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
index 80371447..becc6433 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
@@ -1,6 +1,7 @@
package oss
import (
+ "bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
@@ -8,6 +9,7 @@ import (
"errors"
"fmt"
"io/ioutil"
+ "net/http"
"os"
"path/filepath"
"time"
@@ -44,7 +46,7 @@ func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject strin
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
absPath, _ := filepath.Abs(srcFile)
- cpFileName := getCpFileName(absPath, dest)
+ cpFileName := getCpFileName(absPath, dest, "")
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
@@ -54,7 +56,7 @@ func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject strin
// getCpConfig gets checkpoint configuration
func getCpConfig(options []Option) *cpConfig {
- cpcOpt, err := findOption(options, checkpointConfig, nil)
+ cpcOpt, err := FindOption(options, checkpointConfig, nil)
if err != nil || cpcOpt == nil {
return nil
}
@@ -63,7 +65,7 @@ func getCpConfig(options []Option) *cpConfig {
}
// getCpFileName return the name of the checkpoint file
-func getCpFileName(src, dest string) string {
+func getCpFileName(src, dest, versionId string) string {
md5Ctx := md5.New()
md5Ctx.Write([]byte(src))
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
@@ -72,12 +74,19 @@ func getCpFileName(src, dest string) string {
md5Ctx.Write([]byte(dest))
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
- return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
+ if versionId == "" {
+ return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
+ }
+
+ md5Ctx.Reset()
+ md5Ctx.Write([]byte(versionId))
+ versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
+ return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum)
}
// getRoutines gets the routine count. by default it's 1.
func getRoutines(options []Option) int {
- rtnOpt, err := findOption(options, routineNum, nil)
+ rtnOpt, err := FindOption(options, routineNum, nil)
if err != nil || rtnOpt == nil {
return 1
}
@@ -94,17 +103,16 @@ func getRoutines(options []Option) int {
// getPayer return the payer of the request
func getPayer(options []Option) string {
- payerOpt, err := findOption(options, HTTPHeaderOSSRequester, nil)
+ payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil)
if err != nil || payerOpt == nil {
return ""
}
-
return payerOpt.(string)
}
-// getProgressListener gets the progress callback
-func getProgressListener(options []Option) ProgressListener {
- isSet, listener, _ := isOptionSet(options, progressListener)
+// GetProgressListener gets the progress callback
+func GetProgressListener(options []Option) ProgressListener {
+ isSet, listener, _ := IsOptionSet(options, progressListener)
if !isSet {
return nil
}
@@ -130,14 +138,32 @@ type workerArg struct {
}
// worker is the worker coroutine function
+type defaultUploadProgressListener struct {
+}
+
+// ProgressChanged no-ops
+func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) {
+}
+
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(id, chunk); err != nil {
failed <- err
break
}
- part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...)
+ var respHeader http.Header
+ p := Progress(&defaultUploadProgressListener{})
+ opts := make([]Option, len(arg.options)+2)
+ opts = append(opts, arg.options...)
+
+ // use defaultUploadProgressListener
+ opts = append(opts, p, GetResponseHeader(&respHeader))
+
+ startT := time.Now().UnixNano() / 1000 / 1000 / 1000
+ part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...)
+ endT := time.Now().UnixNano() / 1000 / 1000 / 1000
if err != nil {
+ arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error())
failed <- err
break
}
@@ -168,18 +194,16 @@ func getTotalBytes(chunks []FileChunk) int64 {
// uploadFile is a concurrent upload, without checkpoint
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
chunks, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
- payerOptions := []Option{}
- payer := getPayer(options)
- if payer != "" {
- payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
- }
+ partOptions := ChoiceTransferPartOption(options)
+ completeOptions := ChoiceCompletePartOption(options)
+ abortOptions := ChoiceAbortPartOption(options)
// Initialize the multipart upload
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
@@ -194,11 +218,11 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
var completedBytes int64
totalBytes := getTotalBytes(chunks)
- event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+ event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start the worker coroutine
- arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
+ arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
@@ -215,13 +239,16 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
completed++
parts[part.PartNumber-1] = part
completedBytes += chunks[part.PartNumber-1].Size
- event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+
+ // why RwBytes in ProgressEvent is 0 ?
+ // because read or write event has been notified in teeReader.Read()
+ event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
- event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
- bucket.AbortMultipartUpload(imur, payerOptions...)
+ bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
@@ -230,13 +257,13 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
}
}
- event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
// Complete the multpart upload
- _, err = bucket.CompleteMultipartUpload(imur, parts, payerOptions...)
+ _, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...)
if err != nil {
- bucket.AbortMultipartUpload(imur, payerOptions...)
+ bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
return nil
@@ -253,6 +280,8 @@ type uploadCheckpoint struct {
ObjectKey string // Key
UploadID string // Upload ID
Parts []cpPart // All parts of the local file
+ CallbackVal string
+ CallbackBody *[]byte
}
type cpStat struct {
@@ -268,7 +297,19 @@ type cpPart struct {
}
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
-func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
+func (cp uploadCheckpoint) isValid(filePath string,options []Option) (bool, error) {
+
+ callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "")
+ if callbackVal != "" && cp.CallbackVal != callbackVal {
+ return false, nil
+ }
+ callbackBody, _ := FindOption(options, responseBody, nil)
+ if callbackBody != nil{
+ body, _ := json.Marshal(callbackBody)
+ if bytes.Equal(*cp.CallbackBody, body) {
+ return false, nil
+ }
+ }
// Compare the CP's magic number and MD5.
cpb := cp
cpb.MD5 = ""
@@ -299,7 +340,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
// Compare the file size, file's last modified time and file's MD5
if cp.FileStat.Size != st.Size() ||
- cp.FileStat.LastModified != st.ModTime() ||
+ !cp.FileStat.LastModified.Equal(st.ModTime()) ||
cp.FileStat.MD5 != md {
return false, nil
}
@@ -404,6 +445,13 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
}
cp.FileStat.Size = st.Size()
cp.FileStat.LastModified = st.ModTime()
+ callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "")
+ cp.CallbackVal = callbackVal.(string)
+ callbackBody, _ := FindOption(options, responseBody, nil)
+ if callbackBody != nil {
+ body, _ := json.Marshal(callbackBody)
+ cp.CallbackBody = &body
+ }
md, err := calcFileMD5(filePath)
if err != nil {
return err
@@ -436,8 +484,12 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
Key: cp.ObjectKey, UploadID: cp.UploadID}
+
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
if err != nil {
+ if e, ok := err.(ServiceError);ok && (e.StatusCode == 203 || e.StatusCode == 404) {
+ os.Remove(cpFilePath)
+ }
return err
}
os.Remove(cpFilePath)
@@ -446,13 +498,10 @@ func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePa
// uploadFileWithCp handles concurrent upload with checkpoint
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
- listener := getProgressListener(options)
+ listener := GetProgressListener(options)
- payerOptions := []Option{}
- payer := getPayer(options)
- if payer != "" {
- payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
- }
+ partOptions := ChoiceTransferPartOption(options)
+ completeOptions := ChoiceCompletePartOption(options)
// Load CP data
ucp := uploadCheckpoint{}
@@ -462,7 +511,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
}
// Load error or the CP data is invalid.
- valid, err := ucp.isValid(filePath)
+ valid, err := ucp.isValid(filePath,options)
if err != nil || !valid {
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
return err
@@ -482,11 +531,14 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
die := make(chan bool)
completedBytes := ucp.getCompletedBytes()
- event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
+
+ // why RwBytes in ProgressEvent is 0 ?
+ // because read or write event has been notified in teeReader.Read()
+ event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0)
publishProgress(listener, event)
// Start the workers
- arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
+ arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
@@ -503,11 +555,11 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
ucp.updatePart(part)
ucp.dump(cpFilePath)
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
- event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size)
+ event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
- event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0)
publishProgress(listener, event)
return err
}
@@ -517,10 +569,10 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
}
}
- event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0)
publishProgress(listener, event)
// Complete the multipart upload
- err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, payerOptions)
+ err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions)
return err
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
index c0e7b2b1..0d92a53e 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
@@ -4,16 +4,40 @@ import (
"bytes"
"errors"
"fmt"
+ "hash/crc32"
"hash/crc64"
+ "io"
"net/http"
"os"
- "os/exec"
"runtime"
"strconv"
"strings"
"time"
+ "unicode/utf8"
)
+var sys_name string
+var sys_release string
+var sys_machine string
+
+var (
+ escQuot = []byte(""") // shorter than """
+ escApos = []byte("'") // shorter than "'"
+ escAmp = []byte("&")
+ escLT = []byte("<")
+ escGT = []byte(">")
+ escTab = []byte(" ")
+ escNL = []byte("
")
+ escCR = []byte("
")
+ escFFFD = []byte("\uFFFD") // Unicode replacement character
+)
+
+func init() {
+ sys_name = runtime.GOOS
+ sys_release = "-"
+ sys_machine = runtime.GOARCH
+}
+
// userAgent gets user agent
// It has the SDK version information, OS information and GO version
func userAgent() string {
@@ -31,36 +55,45 @@ type sysInfo struct {
// getSysInfo gets system info
// gets the OS information and CPU type
func getSysInfo() sysInfo {
- name := runtime.GOOS
- release := "-"
- machine := runtime.GOARCH
- if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
- name = string(bytes.TrimSpace(out))
- }
- if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
- release = string(bytes.TrimSpace(out))
- }
- if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
- machine = string(bytes.TrimSpace(out))
+ return sysInfo{name: sys_name, release: sys_release, machine: sys_machine}
+}
+
+// GetRangeConfig gets the download range from the options.
+func GetRangeConfig(options []Option) (*UnpackedRange, error) {
+ rangeOpt, err := FindOption(options, HTTPHeaderRange, nil)
+ if err != nil || rangeOpt == nil {
+ return nil, err
}
- return sysInfo{name: name, release: release, machine: machine}
+ return ParseRange(rangeOpt.(string))
}
-// unpackedRange
-type unpackedRange struct {
- hasStart bool // Flag indicates if the start point is specified
- hasEnd bool // Flag indicates if the end point is specified
- start int64 // Start point
- end int64 // End point
+// UnpackedRange
+type UnpackedRange struct {
+ HasStart bool // Flag indicates if the start point is specified
+ HasEnd bool // Flag indicates if the end point is specified
+ Start int64 // Start point
+ End int64 // End point
}
-// invalidRangeError returns invalid range error
-func invalidRangeError(r string) error {
+// InvalidRangeError returns invalid range error
+func InvalidRangeError(r string) error {
return fmt.Errorf("InvalidRange %s", r)
}
-// parseRange parse various styles of range such as bytes=M-N
-func parseRange(normalizedRange string) (*unpackedRange, error) {
+func GetRangeString(unpackRange UnpackedRange) string {
+ var strRange string
+ if unpackRange.HasStart && unpackRange.HasEnd {
+ strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End)
+ } else if unpackRange.HasStart {
+ strRange = fmt.Sprintf("%d-", unpackRange.Start)
+ } else if unpackRange.HasEnd {
+ strRange = fmt.Sprintf("-%d", unpackRange.End)
+ }
+ return strRange
+}
+
+// ParseRange parse various styles of range such as bytes=M-N
+func ParseRange(normalizedRange string) (*UnpackedRange, error) {
var err error
hasStart := false
hasEnd := false
@@ -70,7 +103,7 @@ func parseRange(normalizedRange string) (*unpackedRange, error) {
// Bytes==M-N or ranges=M-N
nrSlice := strings.Split(normalizedRange, "=")
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
// Bytes=M-N,X-Y
@@ -81,62 +114,62 @@ func parseRange(normalizedRange string) (*unpackedRange, error) {
startStr := rStr[:len(rStr)-1]
start, err = strconv.ParseInt(startStr, 10, 64)
if err != nil {
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
hasStart = true
} else if strings.HasPrefix(rStr, "-") { // -N
len := rStr[1:]
end, err = strconv.ParseInt(len, 10, 64)
if err != nil {
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
if end == 0 { // -0
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
hasEnd = true
} else { // M-N
valSlice := strings.Split(rStr, "-")
if len(valSlice) != 2 {
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
start, err = strconv.ParseInt(valSlice[0], 10, 64)
if err != nil {
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
hasStart = true
end, err = strconv.ParseInt(valSlice[1], 10, 64)
if err != nil {
- return nil, invalidRangeError(normalizedRange)
+ return nil, InvalidRangeError(normalizedRange)
}
hasEnd = true
}
- return &unpackedRange{hasStart, hasEnd, start, end}, nil
+ return &UnpackedRange{hasStart, hasEnd, start, end}, nil
}
-// adjustRange returns adjusted range, adjust the range according to the length of the file
-func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
+// AdjustRange returns adjusted range, adjust the range according to the length of the file
+func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) {
if ur == nil {
return 0, size
}
- if ur.hasStart && ur.hasEnd {
- start = ur.start
- end = ur.end + 1
- if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end {
+ if ur.HasStart && ur.HasEnd {
+ start = ur.Start
+ end = ur.End + 1
+ if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End {
start = 0
end = size
}
- } else if ur.hasStart {
- start = ur.start
+ } else if ur.HasStart {
+ start = ur.Start
end = size
- if ur.start < 0 || ur.start >= size {
+ if ur.Start < 0 || ur.Start >= size {
start = 0
}
- } else if ur.hasEnd {
- start = size - ur.end
+ } else if ur.HasEnd {
+ start = size - ur.End
end = size
- if ur.end < 0 || ur.end > size {
+ if ur.End < 0 || ur.End > size {
start = 0
end = size
}
@@ -259,7 +292,318 @@ func GetPartEnd(begin int64, total int64, per int64) int64 {
return begin + per - 1
}
-// crcTable returns the table constructed from the specified polynomial
-var crcTable = func() *crc64.Table {
+// CrcTable returns the table constructed from the specified polynomial
+var CrcTable = func() *crc64.Table {
return crc64.MakeTable(crc64.ECMA)
}
+
+// CrcTable returns the table constructed from the specified polynomial
+var crc32Table = func() *crc32.Table {
+ return crc32.MakeTable(crc32.IEEE)
+}
+
+// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart
+func ChoiceTransferPartOption(options []Option) []Option {
+ var outOption []Option
+
+ listener, _ := FindOption(options, progressListener, nil)
+ if listener != nil {
+ outOption = append(outOption, Progress(listener.(ProgressListener)))
+ }
+
+ payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
+ if payer != nil {
+ outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
+ }
+
+ versionId, _ := FindOption(options, "versionId", nil)
+ if versionId != nil {
+ outOption = append(outOption, VersionId(versionId.(string)))
+ }
+
+ trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil)
+ if trafficLimit != nil {
+ speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64)
+ outOption = append(outOption, TrafficLimitHeader(speed))
+ }
+
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
+ }
+
+ return outOption
+}
+
+// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart
+func ChoiceCompletePartOption(options []Option) []Option {
+ var outOption []Option
+
+ listener, _ := FindOption(options, progressListener, nil)
+ if listener != nil {
+ outOption = append(outOption, Progress(listener.(ProgressListener)))
+ }
+
+ payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
+ if payer != nil {
+ outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
+ }
+
+ acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil)
+ if acl != nil {
+ outOption = append(outOption, ObjectACL(ACLType(acl.(string))))
+ }
+
+ callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
+ if callback != nil {
+ outOption = append(outOption, Callback(callback.(string)))
+ }
+
+ callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil)
+ if callbackVar != nil {
+ outOption = append(outOption, CallbackVar(callbackVar.(string)))
+ }
+
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
+ }
+
+ forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil)
+ if forbidOverWrite != nil {
+ if forbidOverWrite.(string) == "true" {
+ outOption = append(outOption, ForbidOverWrite(true))
+ } else {
+ outOption = append(outOption, ForbidOverWrite(false))
+ }
+ }
+
+ notification, _ := FindOption(options, HttpHeaderOssNotification, nil)
+ if notification != nil {
+ outOption = append(outOption, SetHeader(HttpHeaderOssNotification, notification))
+ }
+
+ return outOption
+}
+
+// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload
+func ChoiceAbortPartOption(options []Option) []Option {
+ var outOption []Option
+ payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
+ if payer != nil {
+ outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
+ }
+
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
+ }
+
+ return outOption
+}
+
+// ChoiceHeadObjectOption choices valid option supported by HeadObject
+func ChoiceHeadObjectOption(options []Option) []Option {
+ var outOption []Option
+
+ // not select HTTPHeaderRange to get whole object length
+ payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
+ if payer != nil {
+ outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
+ }
+
+ versionId, _ := FindOption(options, "versionId", nil)
+ if versionId != nil {
+ outOption = append(outOption, VersionId(versionId.(string)))
+ }
+
+ respHeader, _ := FindOption(options, responseHeader, nil)
+ if respHeader != nil {
+ outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
+ }
+
+ return outOption
+}
+
+func CheckBucketName(bucketName string) error {
+ nameLen := len(bucketName)
+ if nameLen < 3 || nameLen > 63 {
+ return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen)
+ }
+
+ for _, v := range bucketName {
+ if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') {
+ return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName)
+ }
+ }
+ if bucketName[0] == '-' || bucketName[nameLen-1] == '-' {
+ return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName)
+ }
+ return nil
+}
+
+func CheckObjectName(objectName string) error {
+ if len(objectName) == 0 {
+ return fmt.Errorf("object name is empty")
+ }
+ return nil
+}
+
+func GetReaderLen(reader io.Reader) (int64, error) {
+ var contentLength int64
+ var err error
+ switch v := reader.(type) {
+ case *bytes.Buffer:
+ contentLength = int64(v.Len())
+ case *bytes.Reader:
+ contentLength = int64(v.Len())
+ case *strings.Reader:
+ contentLength = int64(v.Len())
+ case *os.File:
+ fInfo, fError := v.Stat()
+ if fError != nil {
+ err = fmt.Errorf("can't get reader content length,%s", fError.Error())
+ } else {
+ contentLength = fInfo.Size()
+ }
+ case *io.LimitedReader:
+ contentLength = int64(v.N)
+ case *LimitedReadCloser:
+ contentLength = int64(v.N)
+ default:
+ err = fmt.Errorf("can't get reader content length,unkown reader type")
+ }
+ return contentLength, err
+}
+
+func LimitReadCloser(r io.Reader, n int64) io.Reader {
+ var lc LimitedReadCloser
+ lc.R = r
+ lc.N = n
+ return &lc
+}
+
+// LimitedRC support Close()
+type LimitedReadCloser struct {
+ io.LimitedReader
+}
+
+func (lc *LimitedReadCloser) Close() error {
+ if closer, ok := lc.R.(io.ReadCloser); ok {
+ return closer.Close()
+ }
+ return nil
+}
+
+type DiscardReadCloser struct {
+ RC io.ReadCloser
+ Discard int
+}
+
+func (drc *DiscardReadCloser) Read(b []byte) (int, error) {
+ n, err := drc.RC.Read(b)
+ if drc.Discard == 0 || n <= 0 {
+ return n, err
+ }
+
+ if n <= drc.Discard {
+ drc.Discard -= n
+ return 0, err
+ }
+
+ realLen := n - drc.Discard
+ copy(b[0:realLen], b[drc.Discard:n])
+ drc.Discard = 0
+ return realLen, err
+}
+
+func (drc *DiscardReadCloser) Close() error {
+ closer, ok := drc.RC.(io.ReadCloser)
+ if ok {
+ return closer.Close()
+ }
+ return nil
+}
+
+func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) {
+ for _, key := range keys {
+ value, ok := params[key]
+ if ok && value == "" {
+ // convert "" to nil
+ params[key] = nil
+ }
+ }
+}
+
+func EscapeLFString(str string) string {
+ var log bytes.Buffer
+ for i := 0; i < len(str); i++ {
+ if str[i] != '\n' {
+ log.WriteByte(str[i])
+ } else {
+ log.WriteString("\\n")
+ }
+ }
+ return log.String()
+}
+
+// EscapeString writes to p the properly escaped XML equivalent
+// of the plain text data s.
+func EscapeXml(s string) string {
+ var p strings.Builder
+ var esc []byte
+ hextable := "0123456789ABCDEF"
+ escPattern := []byte("")
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = escQuot
+ case '\'':
+ esc = escApos
+ case '&':
+ esc = escAmp
+ case '<':
+ esc = escLT
+ case '>':
+ esc = escGT
+ case '\t':
+ esc = escTab
+ case '\n':
+ esc = escNL
+ case '\r':
+ esc = escCR
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ if r >= 0x00 && r < 0x20 {
+ escPattern[3] = hextable[r>>4]
+ escPattern[4] = hextable[r&0x0f]
+ esc = escPattern
+ } else {
+ esc = escFFFD
+ }
+ break
+ }
+ continue
+ }
+ p.WriteString(s[last : i-width])
+ p.Write(esc)
+ last = i
+ }
+ p.WriteString(s[last:])
+ return p.String()
+}
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of https://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xD7FF ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/.travis.yml b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a60..00000000
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CHANGELOG.md b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 00000000..7ed347d3
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,21 @@
+# Changelog
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CONTRIBUTING.md b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09f..a502fdc5 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
We definitely welcome patches and contribution to this project!
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/README.md b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/README.md
index 21205eae..3e9a6188 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/README.md
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/README.md
@@ -1,10 +1,6 @@
-**This package is currently in development and the API may not be stable.**
-
-The API will become stable with v1.
-
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -13,11 +9,13 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
-http://godoc.org/github.com/google/uuid
+http://pkg.go.dev/github.com/google/uuid
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/dce.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/dce.go
index a6479dba..fa820b9d 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/dce.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/dce.go
@@ -42,7 +42,7 @@ func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
-// NewDCEPerson(Person, uint32(os.Getuid()))
+// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
@@ -50,7 +50,7 @@ func NewDCEPerson() (UUID, error) {
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
-// NewDCEGroup(Group, uint32(os.Getgid()))
+// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/hash.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/hash.go
index 4fc5a77d..b404f4be 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/hash.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/hash.go
@@ -26,8 +26,8 @@ var (
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
- h.Write(space[:])
- h.Write([]byte(data))
+ h.Write(space[:]) //nolint:errcheck
+ h.Write(data) //nolint:errcheck
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/marshal.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/marshal.go
index 84bbc588..14bd3407 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/marshal.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/marshal.go
@@ -15,13 +15,12 @@ func (uuid UUID) MarshalText() ([]byte, error) {
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
- // See comment in ParseBytes why we do this.
- // id, err := ParseBytes(data)
id, err := ParseBytes(data)
- if err == nil {
- *uuid = id
+ if err != nil {
+ return err
}
- return err
+ *uuid = id
+ return nil
}
// MarshalBinary implements encoding.BinaryMarshaler.
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node.go
index 5f0156a2..d651a2b0 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node.go
@@ -5,16 +5,14 @@
package uuid
import (
- "net"
"sync"
)
var (
- nodeMu sync.Mutex
- interfaces []net.Interface // cached list of interfaces
- ifname string // name of interface being used
- nodeID [6]byte // hardware for version 1 UUIDs
- zeroID [6]byte // nodeID with only 0's
+ nodeMu sync.Mutex
+ ifname string // name of interface being used
+ nodeID [6]byte // hardware for version 1 UUIDs
+ zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
@@ -39,26 +37,18 @@ func SetNodeInterface(name string) bool {
}
func setNodeInterface(name string) bool {
- if interfaces == nil {
- var err error
- interfaces, err = net.Interfaces()
- if err != nil && name != "" {
- return false
- }
- }
-
- for _, ifs := range interfaces {
- if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
- copy(nodeID[:], ifs.HardwareAddr)
- ifname = ifs.Name
- return true
- }
+ iname, addr := getHardwareInterface(name) // null implementation for js
+ if iname != "" && addr != nil {
+ ifname = iname
+ copy(nodeID[:], addr)
+ return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
+ ifname = "random"
randomBits(nodeID[:])
return true
}
@@ -94,9 +84,6 @@ func SetNodeID(id []byte) bool {
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
- if len(uuid) != 16 {
- return nil
- }
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node_js.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 00000000..b2a0bc87
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This removes the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node_net.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 00000000..0cbbcddb
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned. If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil {
+ return "", nil
+ }
+ }
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ return ifs.Name, ifs.HardwareAddr
+ }
+ }
+ return "", nil
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/null.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/null.go
new file mode 100644
index 00000000..d7fcbf28
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
+// Copyright 2021 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+var jsonNull = []byte("null")
+
+// NullUUID represents a UUID that may be null.
+// NullUUID implements the SQL driver.Scanner interface so
+// it can be used as a scan destination:
+//
+// var u uuid.NullUUID
+// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
+// ...
+// if u.Valid {
+// // use u.UUID
+// } else {
+// // NULL value
+// }
+//
+type NullUUID struct {
+ UUID UUID
+ Valid bool // Valid is true if UUID is not NULL
+}
+
+// Scan implements the SQL driver.Scanner interface.
+func (nu *NullUUID) Scan(value interface{}) error {
+ if value == nil {
+ nu.UUID, nu.Valid = Nil, false
+ return nil
+ }
+
+ err := nu.UUID.Scan(value)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+
+ nu.Valid = true
+ return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nu NullUUID) Value() (driver.Value, error) {
+ if !nu.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return nu.UUID.Value()
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (nu NullUUID) MarshalBinary() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID[:], nil
+ }
+
+ return []byte(nil), nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (nu *NullUUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(nu.UUID[:], data)
+ nu.Valid = true
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (nu NullUUID) MarshalText() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID.MarshalText()
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (nu *NullUUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+ nu.UUID = id
+ nu.Valid = true
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (nu NullUUID) MarshalJSON() ([]byte, error) {
+ if nu.Valid {
+ return json.Marshal(nu.UUID)
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (nu *NullUUID) UnmarshalJSON(data []byte) error {
+ if bytes.Equal(data, jsonNull) {
+ *nu = NullUUID{}
+ return nil // valid null UUID
+ }
+ err := json.Unmarshal(data, &nu.UUID)
+ nu.Valid = err == nil
+ return err
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/sql.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/sql.go
index 528ad0de..2e02ec06 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/sql.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/sql.go
@@ -9,39 +9,40 @@ import (
"fmt"
)
-// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
- switch src.(type) {
+ switch src := src.(type) {
+ case nil:
+ return nil
+
case string:
// if an empty UUID comes from a table, we return a null UUID
- if src.(string) == "" {
+ if src == "" {
return nil
}
// see Parse for required string format
- u, err := Parse(src.(string))
-
+ u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
- case []byte:
- b := src.([]byte)
+ case []byte:
// if an empty UUID comes from a table, we return a null UUID
- if len(b) == 0 {
+ if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
- if len(b) != 16 {
- return uuid.Scan(string(b))
+ if len(src) != 16 {
+ return uuid.Scan(string(src))
}
- copy((*uuid)[:], b)
+ copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/time.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/time.go
index fd7fe0ac..e6ef06cd 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/time.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/time.go
@@ -86,7 +86,7 @@ func clockSequence() int {
return int(clockSeq & 0x3fff)
}
-// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
@@ -100,9 +100,9 @@ func setClockSequence(seq int) {
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
- old_seq := clockSeq
+ oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
- if old_seq != clockSeq {
+ if oldSeq != clockSeq {
lasttime = 0
}
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/uuid.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/uuid.go
index b7b9ced3..dc75f7d9 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/uuid.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/uuid.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google Inc. All rights reserved.
+// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -12,6 +12,7 @@ import (
"fmt"
"io"
"strings"
+ "sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
@@ -33,22 +34,69 @@ const (
Future // Reserved for future definition.
)
-var rander = rand.Reader // random function
+const randPoolSize = 16 * 16
-// Parse decodes s into a UUID or returns an error. Both the UUID form of
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+var (
+ rander = rand.Reader // random function
+ poolEnabled = false
+ poolMu sync.Mutex
+ poolPos = randPoolSize // protected with poolMu
+ pool [randPoolSize]byte // protected with poolMu
+)
+
+type invalidLengthError struct{ len int }
+
+func (err invalidLengthError) Error() string {
+ return fmt.Sprintf("invalid UUID length: %d", err.len)
+}
+
+// IsInvalidLengthError is matcher function for custom error invalidLengthError
+func IsInvalidLengthError(err error) bool {
+ _, ok := err.(invalidLengthError)
+ return ok
+}
+
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
- if len(s) != 36 {
- if len(s) != 36+9 {
- return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
- }
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ switch len(s) {
+ // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36:
+
+ // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
+
+ // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ case 36 + 2:
+ s = s[1:]
+
+ // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ case 32:
+ var ok bool
+ for i := range uuid {
+ uuid[i], ok = xtob(s[i*2], s[i*2+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, invalidLengthError{len(s)}
}
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
@@ -57,12 +105,13 @@ func Parse(s string) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
- if v, ok := xtob(s[x], s[x+1]); !ok {
+ 24, 26, 28, 30, 32, 34,
+ } {
+ v, ok := xtob(s[x], s[x+1])
+ if !ok {
return uuid, errors.New("invalid UUID format")
- } else {
- uuid[i] = v
}
+ uuid[i] = v
}
return uuid, nil
}
@@ -70,15 +119,29 @@ func Parse(s string) (UUID, error) {
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
- if len(b) != 36 {
- if len(b) != 36+9 {
- return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
- }
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ switch len(b) {
+ case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
+ case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ b = b[1:]
+ case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ var ok bool
+ for i := 0; i < 32; i += 2 {
+ uuid[i/2], ok = xtob(b[i], b[i+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, invalidLengthError{len(b)}
}
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
@@ -87,16 +150,34 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
- if v, ok := xtob(b[x], b[x+1]); !ok {
+ 24, 26, 28, 30, 32, 34,
+ } {
+ v, ok := xtob(b[x], b[x+1])
+ if !ok {
return uuid, errors.New("invalid UUID format")
- } else {
- uuid[i] = v
}
+ uuid[i] = v
}
return uuid, nil
}
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+ uuid, err := Parse(s)
+ if err != nil {
+ panic(`uuid: Parse(` + s + `): ` + err.Error())
+ }
+ return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+ err = uuid.UnmarshalBinary(b)
+ return uuid, err
+}
+
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
@@ -123,7 +204,7 @@ func (uuid UUID) URN() string {
}
func encodeHex(dst []byte, uuid UUID) {
- hex.Encode(dst[:], uuid[:4])
+ hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
@@ -176,7 +257,7 @@ func (v Variant) String() string {
return fmt.Sprintf("BadVariant%d", int(v))
}
-// SetRand sets the random number generator to r, which implents io.Reader.
+// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
@@ -189,3 +270,43 @@ func SetRand(r io.Reader) {
}
rander = r
}
+
+// EnableRandPool enables internal randomness pool used for Random
+// (Version 4) UUID generation. The pool contains random bytes read from
+// the random number generator on demand in batches. Enabling the pool
+// may improve the UUID generation throughput significantly.
+//
+// Since the pool is stored on the Go heap, this feature may be a bad fit
+// for security sensitive applications.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func EnableRandPool() {
+ poolEnabled = true
+}
+
+// DisableRandPool disables the randomness pool if it was previously
+// enabled with EnableRandPool.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func DisableRandPool() {
+ poolEnabled = false
+ defer poolMu.Unlock()
+ poolMu.Lock()
+ poolPos = randPoolSize
+}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version1.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version1.go
index 22dc07cd..46310962 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version1.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version1.go
@@ -13,16 +13,10 @@ import (
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
-// return the current NewUUID returns Nil and an error.
+// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
- nodeMu.Lock()
- if nodeID == zeroID {
- setNodeInterface("")
- }
- nodeMu.Unlock()
-
var uuid UUID
now, seq, err := GetTime()
if err != nil {
@@ -38,7 +32,13 @@ func NewUUID() (UUID, error) {
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
return uuid, nil
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version4.go b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version4.go
index 390dd2ca..7697802e 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version4.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/google/uuid/version4.go
@@ -6,7 +6,7 @@ package uuid
import "io"
-// New is creates a new random UUID or panics. New is equivalent to
+// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
@@ -14,12 +14,22 @@ func New() UUID {
return Must(NewRandom())
}
-// NewRandom returns a Random (Version 4) UUID or panics.
+// NewString creates a new random UUID and returns it as a string or panics.
+// NewString is equivalent to the expression
+//
+// uuid.New().String()
+func NewString() string {
+ return Must(NewRandom()).String()
+}
+
+// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
-// A note about uniqueness derived from from the UUID Wikipedia entry:
+// Uses the randomness pool if it was enabled with EnableRandPool.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
@@ -27,8 +37,16 @@ func New() UUID {
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
+ if !poolEnabled {
+ return NewRandomFromReader(rander)
+ }
+ return newRandomFromPool()
+}
+
+// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
+func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID
- _, err := io.ReadFull(rander, uuid[:])
+ _, err := io.ReadFull(r, uuid[:])
if err != nil {
return Nil, err
}
@@ -36,3 +54,23 @@ func NewRandom() (UUID, error) {
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
+
+func newRandomFromPool() (UUID, error) {
+ var uuid UUID
+ poolMu.Lock()
+ if poolPos == randPoolSize {
+ _, err := io.ReadFull(rander, pool[:])
+ if err != nil {
+ poolMu.Unlock()
+ return Nil, err
+ }
+ poolPos = 0
+ }
+ copy(uuid[:], pool[poolPos:(poolPos+16)])
+ poolPos += 16
+ poolMu.Unlock()
+
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/README.md b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/README.md
index 52b111d5..c589addf 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/README.md
+++ b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/README.md
@@ -8,8 +8,6 @@
A high-performance 100% compatible drop-in replacement of "encoding/json"
-You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
-
# Benchmark
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_float.go b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_float.go
index b9754638..8a3d8b6f 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_float.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_float.go
@@ -288,6 +288,9 @@ non_decimal_loop:
return iter.readFloat64SlowPath()
}
value = (value << 3) + (value << 1) + uint64(ind)
+ if value > maxFloat64 {
+ return iter.readFloat64SlowPath()
+ }
}
}
return iter.readFloat64SlowPath()
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_int.go b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_int.go
index 21423203..d786a89f 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_int.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/iter_int.go
@@ -9,6 +9,7 @@ var intDigits []int8
const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+const maxFloat64 = 1<<53 - 1
func init() {
intDigits = make([]int8, 256)
@@ -339,7 +340,7 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
}
func (iter *Iterator) assertInteger() {
- if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+ if iter.head < iter.tail && iter.buf[iter.head] == '.' {
iter.ReportError("assertInteger", "can not decode float as int")
}
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect.go b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect.go
index 74974ba7..39acb320 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect.go
@@ -65,7 +65,7 @@ func (iter *Iterator) ReadVal(obj interface{}) {
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
typ := reflect2.TypeOf(obj)
- if typ.Kind() != reflect.Ptr {
+ if typ == nil || typ.Kind() != reflect.Ptr {
iter.ReportError("ReadVal", "can only unmarshal into pointer")
return
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
index f2619936..eba434f2 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -33,11 +33,19 @@ type jsonRawMessageCodec struct {
}
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+ if iter.ReadNil() {
+ *((*json.RawMessage)(ptr)) = nil
+ } else {
+ *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
+ }
}
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+ if *((*json.RawMessage)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+ }
}
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
@@ -48,11 +56,19 @@ type jsoniterRawMessageCodec struct {
}
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+ if iter.ReadNil() {
+ *((*RawMessage)(ptr)) = nil
+ } else {
+ *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
+ }
}
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteRaw(string(*((*RawMessage)(ptr))))
+ if *((*RawMessage)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ stream.WriteRaw(string(*((*RawMessage)(ptr))))
+ }
}
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index d7eb0eb5..92ae912d 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -1075,6 +1075,11 @@ type stringModeNumberDecoder struct {
}
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() == NilValue {
+ decoder.elemDecoder.Decode(ptr, iter)
+ return
+ }
+
c := iter.nextToken()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/.travis.yml b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/.travis.yml
index fbb43744..b097728d 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/.travis.yml
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/.travis.yml
@@ -1,7 +1,7 @@
language: go
go:
- - 1.8.x
+ - 1.9.x
- 1.x
before_install:
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.lock b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.lock
index 2a3a6989..10ef8111 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.lock
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.lock
@@ -1,15 +1,9 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-[[projects]]
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
- version = "1.0.0"
-
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
+ input-imports = []
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.toml b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.toml
index 2f4f4dbd..a9bc5061 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.toml
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/Gopkg.toml
@@ -26,10 +26,6 @@
ignored = []
-[[constraint]]
- name = "github.com/modern-go/concurrent"
- version = "1.0.0"
-
[prune]
go-tests = true
unused-packages = true
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_118.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_118.go
new file mode 100644
index 00000000..2b4116f6
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_118.go
@@ -0,0 +1,23 @@
+//+build go1.18
+
+package reflect2
+
+import (
+ "unsafe"
+)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter)
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+ var it hiter
+ mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it)
+ return &UnsafeMapIterator{
+ hiter: &it,
+ pKeyRType: type2.pKeyRType,
+ pElemRType: type2.pElemRType,
+ }
+}
\ No newline at end of file
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_17.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_17.go
deleted file mode 100644
index 5c1cea86..00000000
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_17.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//+build go1.7
-
-package reflect2
-
-import "unsafe"
-
-//go:linkname resolveTypeOff reflect.resolveTypeOff
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_19.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_19.go
index c7e3b780..974f7685 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_19.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_above_19.go
@@ -6,6 +6,9 @@ import (
"unsafe"
)
+//go:linkname resolveTypeOff reflect.resolveTypeOff
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
//go:linkname makemap reflect.makemap
func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_118.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_118.go
new file mode 100644
index 00000000..00003dbd
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_118.go
@@ -0,0 +1,21 @@
+//+build !go1.18
+
+package reflect2
+
+import (
+ "unsafe"
+)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter)
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+ return &UnsafeMapIterator{
+ hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
+ pKeyRType: type2.pKeyRType,
+ pElemRType: type2.pElemRType,
+ }
+}
\ No newline at end of file
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_17.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_17.go
deleted file mode 100644
index 65a93c88..00000000
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_17.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//+build !go1.7
-
-package reflect2
-
-import "unsafe"
-
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
- return nil
-}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_19.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_19.go
deleted file mode 100644
index b050ef70..00000000
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/go_below_19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//+build !go1.9
-
-package reflect2
-
-import (
- "unsafe"
-)
-
-//go:linkname makemap reflect.makemap
-func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
-
-func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
- return makemap(rtype)
-}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/reflect2.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/reflect2.go
index 63b49c79..c43c8b9d 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/reflect2.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/reflect2.go
@@ -1,8 +1,9 @@
package reflect2
import (
- "github.com/modern-go/concurrent"
"reflect"
+ "runtime"
+ "sync"
"unsafe"
)
@@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
type frozenConfig struct {
useSafeImplementation bool
- cache *concurrent.Map
+ cache *sync.Map
}
func (cfg Config) Froze() *frozenConfig {
return &frozenConfig{
useSafeImplementation: cfg.UseSafeImplementation,
- cache: concurrent.NewMap(),
+ cache: new(sync.Map),
}
}
@@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer {
}
func UnsafeCastString(str string) []byte {
+ bytes := make([]byte, 0)
stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
- sliceHeader := &reflect.SliceHeader{
- Data: stringHeader.Data,
- Cap: stringHeader.Len,
- Len: stringHeader.Len,
- }
- return *(*[]byte)(unsafe.Pointer(sliceHeader))
+ sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))
+ sliceHeader.Data = stringHeader.Data
+ sliceHeader.Cap = stringHeader.Len
+ sliceHeader.Len = stringHeader.Len
+ runtime.KeepAlive(str)
+ return bytes
}
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/test.sh b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/test.sh
deleted file mode 100644
index 3d2b9768..00000000
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
- go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/type_map.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/type_map.go
index 3acfb558..4b13c315 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/type_map.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/type_map.go
@@ -1,17 +1,13 @@
+// +build !gccgo
+
package reflect2
import (
"reflect"
- "runtime"
- "strings"
"sync"
"unsafe"
)
-// typelinks1 for 1.5 ~ 1.6
-//go:linkname typelinks1 reflect.typelinks
-func typelinks1() [][]unsafe.Pointer
-
// typelinks2 for 1.7 ~
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
@@ -27,49 +23,10 @@ func discoverTypes() {
types = make(map[string]reflect.Type)
packages = make(map[string]map[string]reflect.Type)
- ver := runtime.Version()
- if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
- loadGo15Types()
- } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
- loadGo15Types()
- } else {
- loadGo17Types()
- }
-}
-
-func loadGo15Types() {
- var obj interface{} = reflect.TypeOf(0)
- typePtrss := typelinks1()
- for _, typePtrs := range typePtrss {
- for _, typePtr := range typePtrs {
- (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
- typ := obj.(reflect.Type)
- if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
- typ.Elem().Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem().Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- }
- }
+ loadGoTypes()
}
-func loadGo17Types() {
+func loadGoTypes() {
var obj interface{} = reflect.TypeOf(0)
sections, offset := typelinks2()
for i, offs := range offset {
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_link.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_link.go
index 57229c8d..b49f614e 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_link.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_link.go
@@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
//go:linkname mapassign reflect.mapassign
//go:noescape
-func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
+func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer)
//go:linkname mapaccess reflect.mapaccess
//go:noescape
func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
-// m escapes into the return value, but the caller of mapiterinit
-// doesn't let the return value escape.
-//go:noescape
-//go:linkname mapiterinit reflect.mapiterinit
-func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
-
//go:noescape
//go:linkname mapiternext reflect.mapiternext
func mapiternext(it *hiter)
@@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
// the layout of this structure.
type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
- value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
- // rest fields are ignored
+ key unsafe.Pointer
+ value unsafe.Pointer
+ t unsafe.Pointer
+ h unsafe.Pointer
+ buckets unsafe.Pointer
+ bptr unsafe.Pointer
+ overflow *[]unsafe.Pointer
+ oldoverflow *[]unsafe.Pointer
+ startBucket uintptr
+ offset uint8
+ wrapped bool
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
}
// add returns p+x.
diff --git a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_map.go b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_map.go
index f2e76e6b..37872da8 100644
--- a/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_map.go
+++ b/src/bosh-alicloud-cpi/vendor/github.com/modern-go/reflect2/unsafe_map.go
@@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
return type2.UnsafeIterate(objEFace.data)
}
-func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
- return &UnsafeMapIterator{
- hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
- pKeyRType: type2.pKeyRType,
- pElemRType: type2.pElemRType,
- }
-}
-
type UnsafeMapIterator struct {
*hiter
pKeyRType unsafe.Pointer
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/.golangci.yml b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/.golangci.yml
new file mode 100644
index 00000000..b7256bae
--- /dev/null
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/.golangci.yml
@@ -0,0 +1,21 @@
+linters-settings:
+ nakedret:
+ max-func-lines: 0 # Disallow any unnamed return statement
+
+linters:
+ enable:
+ - deadcode
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+ - nakedret
+ - gofmt
+ - rowserrcheck
+ - unconvert
+ - goimports
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/README.md b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/README.md
index 5d65658b..1e429445 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/README.md
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/README.md
@@ -24,7 +24,7 @@ Package ini provides INI file read and write functionality in Go.
## Installation
-The minimum requirement of Go is **1.6**.
+The minimum requirement of Go is **1.12**.
```sh
$ go get gopkg.in/ini.v1
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/codecov.yml b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/codecov.yml
index fc947f23..31f646ee 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/codecov.yml
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/codecov.yml
@@ -6,4 +6,4 @@ coverage:
threshold: 1%
comment:
- layout: 'diff, files'
+ layout: 'diff'
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/file.go b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/file.go
index f95606f9..7b4e560d 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/file.go
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/file.go
@@ -55,6 +55,9 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File {
if len(opts.KeyValueDelimiterOnWrite) == 0 {
opts.KeyValueDelimiterOnWrite = "="
}
+ if len(opts.ChildSectionDelimiter) == 0 {
+ opts.ChildSectionDelimiter = "."
+ }
return &File{
BlockMode: true,
@@ -82,7 +85,7 @@ func (f *File) NewSection(name string) (*Section, error) {
return nil, errors.New("empty section name")
}
- if f.options.Insensitive && name != DefaultSection {
+ if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
name = strings.ToLower(name)
}
@@ -139,12 +142,18 @@ func (f *File) GetSection(name string) (*Section, error) {
return secs[0], err
}
+// HasSection returns true if the file contains a section with given name.
+func (f *File) HasSection(name string) bool {
+ section, _ := f.GetSection(name)
+ return section != nil
+}
+
// SectionsByName returns all sections with given name.
func (f *File) SectionsByName(name string) ([]*Section, error) {
if len(name) == 0 {
name = DefaultSection
}
- if f.options.Insensitive {
+ if f.options.Insensitive || f.options.InsensitiveSections {
name = strings.ToLower(name)
}
@@ -165,8 +174,9 @@ func (f *File) SectionsByName(name string) ([]*Section, error) {
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
- // Note: It's OK here because the only possible error is empty section name,
- // but if it's empty, this piece of code won't be executed.
+ if name == "" {
+ name = DefaultSection
+ }
sec, _ = f.NewSection(name)
return sec
}
@@ -236,7 +246,7 @@ func (f *File) DeleteSectionWithIndex(name string, index int) error {
if len(name) == 0 {
name = DefaultSection
}
- if f.options.Insensitive {
+ if f.options.Insensitive || f.options.InsensitiveSections {
name = strings.ToLower(name)
}
@@ -299,6 +309,9 @@ func (f *File) Reload() (err error) {
}
return err
}
+ if f.options.ShortCircuit {
+ return nil
+ }
}
return nil
}
@@ -347,7 +360,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
}
}
- if i > 0 || DefaultHeader {
+ if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return nil, err
}
@@ -451,6 +464,8 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
val = `"""` + val + `"""`
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
+ } else if len(strings.TrimSpace(val)) != len(val) {
+ val = `"` + val + `"`
}
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
return nil, err
@@ -494,7 +509,7 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
- // so it's safer to save to a temporary file location and rename afte done.
+ // so it's safer to save to a temporary file location and rename after done.
buf, err := f.writeToBuffer(indent)
if err != nil {
return err
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/ini.go b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/ini.go
index 2961543f..ac2a93a5 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/ini.go
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/ini.go
@@ -1,5 +1,3 @@
-// +build go1.6
-
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
@@ -71,12 +69,18 @@ type LoadOptions struct {
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
+ // InsensitiveSections indicates whether the parser forces all section to lowercase.
+ InsensitiveSections bool
+ // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
+ InsensitiveKeys bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
SkipUnrecognizableLines bool
+ // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
+ ShortCircuit bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
@@ -107,8 +111,10 @@ type LoadOptions struct {
UnparseableSections []string
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
KeyValueDelimiters string
- // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=".
+ // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
KeyValueDelimiterOnWrite string
+ // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
+ ChildSectionDelimiter string
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
PreserveSurroundedQuote bool
// DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
@@ -117,6 +123,8 @@ type LoadOptions struct {
ReaderBufferSize int
// AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
AllowNonUniqueSections bool
+ // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated.
+ AllowDuplicateShadowValues bool
}
// DebugFunc is the type of function called to log parse events.
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/key.go b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/key.go
index 8baafd9e..0302c291 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/key.go
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/key.go
@@ -54,14 +54,16 @@ func (k *Key) addShadow(val string) error {
return errors.New("cannot add shadow to auto-increment or boolean key")
}
- // Deduplicate shadows based on their values.
- if k.value == val {
- return nil
- }
- for i := range k.shadows {
- if k.shadows[i].value == val {
+ if !k.s.f.options.AllowDuplicateShadowValues {
+ // Deduplicate shadows based on their values.
+ if k.value == val {
return nil
}
+ for i := range k.shadows {
+ if k.shadows[i].value == val {
+ return nil
+ }
+ }
}
shadow := newKey(k.s, k.name, val)
@@ -781,10 +783,8 @@ func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]u
return vals, err
}
-
type Parser func(str string) (interface{}, error)
-
// parseTimesFormat transforms strings to times in given format.
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
@@ -801,7 +801,6 @@ func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnO
return vals, err
}
-
// doParse transforms strings to different types
func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
vals := make([]interface{}, 0, len(strs))
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/parser.go b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/parser.go
index ea6c08b0..ac1c980a 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/parser.go
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/parser.go
@@ -131,7 +131,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) {
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
- if len(line) > 6 && string(line[0:3]) == `"""` {
+ if len(line) > 6 && line[0:3] == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
@@ -232,7 +232,7 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
}
var valQuote string
- if len(line) > 3 && string(line[0:3]) == `"""` {
+ if len(line) > 3 && line[0:3] == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
@@ -289,12 +289,8 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
line = line[1 : len(line)-1]
} else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
- if strings.Contains(line, `\;`) {
- line = strings.Replace(line, `\;`, ";", -1)
- }
- if strings.Contains(line, `\#`) {
- line = strings.Replace(line, `\#`, "#", -1)
- }
+ line = strings.ReplaceAll(line, `\;`, ";")
+ line = strings.ReplaceAll(line, `\#`, "#")
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
@@ -306,15 +302,9 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
- indentSize := 0
for {
peekData, peekErr := peekBuffer.ReadBytes('\n')
- if peekErr != nil {
- if peekErr == io.EOF {
- p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
- return line, nil
- }
-
+ if peekErr != nil && peekErr != io.EOF {
p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
return "", peekErr
}
@@ -333,19 +323,6 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro
return line, nil
}
- // Determine indent size and line prefix.
- currentIndentSize := len(peekMatches[1])
- if indentSize < 1 {
- indentSize = currentIndentSize
- p.debug("readPythonMultilines: indent size is %d", indentSize)
- }
-
- // Make sure each line is indented at least as far as first line.
- if currentIndentSize < indentSize {
- p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
- return line, nil
- }
-
// Advance the parser reader (buffer) in-sync with the peek buffer.
_, err := p.buf.Discard(len(peekData))
if err != nil {
@@ -353,8 +330,7 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro
return "", err
}
- // Handle indented empty line.
- line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
+ line += "\n" + peekMatches[0]
}
}
@@ -377,7 +353,7 @@ func (f *File) parse(reader io.Reader) (err error) {
// Ignore error because default section name is never empty string.
name := DefaultSection
- if f.options.Insensitive {
+ if f.options.Insensitive || f.options.InsensitiveSections {
name = strings.ToLower(DefaultSection)
}
section, _ := f.NewSection(name)
@@ -465,11 +441,13 @@ func (f *File) parse(reader io.Reader) (err error) {
// Reset auto-counter and comments
p.comment.Reset()
p.count = 1
+ // Nested values can't span sections
+ isLastValueEmpty = false
inUnparseableSection = false
for i := range f.options.UnparseableSections {
if f.options.UnparseableSections[i] == name ||
- (f.options.Insensitive && strings.EqualFold(f.options.UnparseableSections[i], name)) {
+ ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
inUnparseableSection = true
continue
}
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/section.go b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/section.go
index 6ba5ac29..a3615d82 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/section.go
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/section.go
@@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) {
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
- } else if s.f.options.Insensitive {
+ } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
name = strings.ToLower(name)
}
@@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
if s.f.BlockMode {
s.f.lock.RLock()
}
- if s.f.options.Insensitive {
+ if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
name = strings.ToLower(name)
}
key := s.keys[name]
@@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
// Check if it is a child-section.
sname := s.name
for {
- if i := strings.LastIndex(sname, "."); i > -1 {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
@@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
- if i := strings.LastIndex(sname, "."); i > -1 {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
@@ -217,7 +217,7 @@ func (s *Section) KeysHash() map[string]string {
defer s.f.lock.RUnlock()
}
- hash := map[string]string{}
+ hash := make(map[string]string, len(s.keysHash))
for key, value := range s.keysHash {
hash[key] = value
}
@@ -245,7 +245,7 @@ func (s *Section) DeleteKey(name string) {
// For example, "[parent.child1]" and "[parent.child12]" are child sections
// of section "[parent]".
func (s *Section) ChildSections() []*Section {
- prefix := s.name + "."
+ prefix := s.name + s.f.options.ChildSectionDelimiter
children := make([]*Section, 0, 3)
for _, name := range s.f.sectionList {
if strings.HasPrefix(name, prefix) {
diff --git a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/struct.go b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/struct.go
index 1df54719..a486b2fe 100644
--- a/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/struct.go
+++ b/src/bosh-alicloud-cpi/vendor/gopkg.in/ini.v1/struct.go
@@ -263,22 +263,21 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
return nil
}
-func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) {
- opts := strings.SplitN(tag, ",", 4)
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
+ opts := strings.SplitN(tag, ",", 5)
rawName = opts[0]
- if len(opts) > 1 {
- omitEmpty = opts[1] == "omitempty"
+ for _, opt := range opts[1:] {
+ omitEmpty = omitEmpty || (opt == "omitempty")
+ allowShadow = allowShadow || (opt == "allowshadow")
+ allowNonUnique = allowNonUnique || (opt == "nonunique")
+ extends = extends || (opt == "extends")
}
- if len(opts) > 2 {
- allowShadow = opts[2] == "allowshadow"
- }
- if len(opts) > 3 {
- allowNonUnique = opts[3] == "nonunique"
- }
- return rawName, omitEmpty, allowShadow, allowNonUnique
+ return rawName, omitEmpty, allowShadow, allowNonUnique, extends
}
-func (s *Section) mapToField(val reflect.Value, isStrict bool) error {
+// mapToField maps the given value to the matching field of the given section.
+// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
+func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
@@ -293,7 +292,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool) error {
continue
}
- rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag)
+ rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
@@ -301,19 +300,36 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool) error {
isStruct := tpField.Type.Kind() == reflect.Struct
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
- isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
- if isAnonymous {
+ isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ if isAnonymousPtr {
field.Set(reflect.New(tpField.Type.Elem()))
}
- if isAnonymous || isStruct || isStructPtr {
- if sec, err := s.f.GetSection(fieldName); err == nil {
+ if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ fieldSection := s
+ if rawName != "" {
+ sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
+ if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
+ fieldSection = secs[sectionIndex]
+ }
+ }
+ if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ } else if isAnonymousPtr || isStruct || isStructPtr {
+ if secs, err := s.f.SectionsByName(fieldName); err == nil {
+ if len(secs) <= sectionIndex {
+ return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
+ }
// Only set the field to non-nil struct value if we have a section for it.
// Otherwise, we end up with a non-nil struct ptr even though there is no data.
if isStructPtr && field.IsNil() {
field.Set(reflect.New(tpField.Type.Elem()))
}
- if err = sec.mapToField(field, isStrict); err != nil {
+ if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
return fmt.Errorf("map to field %q: %v", fieldName, err)
}
continue
@@ -350,9 +366,9 @@ func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (
}
typ := val.Type().Elem()
- for _, sec := range secs {
+ for i, sec := range secs {
elem := reflect.New(typ)
- if err = sec.mapToField(elem, isStrict); err != nil {
+ if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
}
@@ -382,7 +398,7 @@ func (s *Section) mapTo(v interface{}, isStrict bool) error {
return nil
}
- return s.mapToField(val, isStrict)
+ return s.mapToField(val, isStrict, 0, s.name)
}
// MapTo maps section to given struct.
@@ -474,7 +490,7 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all
_ = keyWithShadows.AddShadow(val)
}
}
- key = keyWithShadows
+ *key = *keyWithShadows
return nil
}
@@ -576,7 +592,7 @@ func (s *Section) reflectFrom(val reflect.Value) error {
continue
}
- rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag)
+ rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
if omitEmpty && isEmptyValue(field) {
continue
}
@@ -590,7 +606,14 @@ func (s *Section) reflectFrom(val reflect.Value) error {
continue
}
- if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
+ if err := s.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
diff --git a/src/bosh-alicloud-cpi/vendor/modules.txt b/src/bosh-alicloud-cpi/vendor/modules.txt
index ed28ff8b..19205a73 100644
--- a/src/bosh-alicloud-cpi/vendor/modules.txt
+++ b/src/bosh-alicloud-cpi/vendor/modules.txt
@@ -1,7 +1,7 @@
# github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68
## explicit
github.com/alibabacloud-go/debug/debug
-# github.com/alibabacloud-go/tea v1.1.20
+# github.com/alibabacloud-go/tea v1.2.1
## explicit; go 1.14
github.com/alibabacloud-go/tea/tea
github.com/alibabacloud-go/tea/utils
@@ -29,7 +29,7 @@ github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils
github.com/aliyun/alibaba-cloud-sdk-go/services/ecs
github.com/aliyun/alibaba-cloud-sdk-go/services/location
github.com/aliyun/alibaba-cloud-sdk-go/services/slb
-# github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5
+# github.com/aliyun/aliyun-oss-go-sdk v3.0.1+incompatible
## explicit
github.com/aliyun/aliyun-oss-go-sdk/oss
# github.com/aliyun/credentials-go v1.2.7 => github.com/aliyun/credentials-go v1.2.7
@@ -60,20 +60,20 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c
+# github.com/google/uuid v1.4.0
## explicit
github.com/google/uuid
# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
## explicit
github.com/jmespath/go-jmespath
-# github.com/json-iterator/go v1.1.10
+# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
## explicit
github.com/modern-go/concurrent
-# github.com/modern-go/reflect2 v1.0.1
-## explicit
+# github.com/modern-go/reflect2 v1.0.2
+## explicit; go 1.12
github.com/modern-go/reflect2
# github.com/onsi/ginkgo v1.2.0
## explicit
@@ -133,7 +133,7 @@ golang.org/x/text/transform
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
## explicit
golang.org/x/time/rate
-# gopkg.in/ini.v1 v1.56.0
+# gopkg.in/ini.v1 v1.66.2
## explicit
gopkg.in/ini.v1
# gopkg.in/yaml.v3 v3.0.1