diff --git a/go.mod b/go.mod
index 45ee65f..e8b729e 100644
--- a/go.mod
+++ b/go.mod
@@ -4,6 +4,13 @@ go 1.12
require (
cloud.google.com/go v0.38.0
+ github.com/Azure/azure-sdk-for-go v36.1.0+incompatible
+ github.com/Azure/azure-storage-file-go v0.6.0
+ github.com/Azure/go-autorest/autorest v0.9.2
+ github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
+ github.com/Azure/go-autorest/autorest/azure/auth v0.4.0
+ github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
+ github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect
github.com/Microsoft/hcsshim v0.8.7-0.20190801035247-8694eade7dd3 // indirect
github.com/PuerkitoBio/goquery v1.5.0
github.com/aws/aws-sdk-go v1.21.10
@@ -42,7 +49,7 @@ require (
github.com/pkg/errors v0.8.1
github.com/stretchr/testify v1.3.0
github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5
- golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf
+ golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f
golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9 // indirect
golang.org/x/sys v0.0.0-20191104094858-e8c54fb511f6 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
diff --git a/go.sum b/go.sum
index 0ddd9dd..a13522e 100644
--- a/go.sum
+++ b/go.sum
@@ -3,8 +3,43 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
+github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
+github.com/Azure/azure-sdk-for-go v36.1.0+incompatible h1:smHlbChr/JDmsyUqELZXLs0YIgpXecIGdUibuc2983s=
+github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-storage-file-go v0.6.0 h1:C8DY6l1s1c0mfQXC9ijI1ddDwHdIbvwoDH8agIT9ryk=
+github.com/Azure/azure-storage-file-go v0.6.0/go.mod h1:/En0UPyBtnVgniO08kDwCLL8letVdjIbjIeGmJeziaA=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
+github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI=
+github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I=
+github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
+github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
+github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
+github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -74,7 +109,10 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU=
@@ -425,6 +463,8 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN
github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc=
github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
+github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
@@ -557,12 +597,13 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss=
-golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f h1:kz4KIr+xcPUsI3VMoqWfPMvtnJ6MGfiVwsWSVzphMO4=
+golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
@@ -635,12 +676,14 @@ golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ=
golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191104094858-e8c54fb511f6 h1:ZJUmhYTp8GbGC0ViZRc2U+MIYQ8xx9MscsdXnclfIhw=
diff --git a/provider/azure/app.go b/provider/azure/app.go
new file mode 100644
index 0000000..a767184
--- /dev/null
+++ b/provider/azure/app.go
@@ -0,0 +1,41 @@
+package azure
+
+import (
+ "github.com/convox/convox/pkg/structs"
+ am "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func (p *Provider) AppGet(name string) (*structs.App, error) {
+ a, err := p.Provider.AppGet(name)
+ if err != nil {
+ return nil, err
+ }
+
+ switch a.Parameters["Router"] {
+ case "dedicated":
+ ing, err := p.Cluster.ExtensionsV1beta1().Ingresses(p.AppNamespace(a.Name)).Get(a.Name, am.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ if len(ing.Status.LoadBalancer.Ingress) > 0 {
+ a.Router = ing.Status.LoadBalancer.Ingress[0].IP
+ }
+ }
+
+ return a, nil
+}
+
+func (p *Provider) AppIdles(name string) (bool, error) {
+ return false, nil
+}
+
+func (p *Provider) AppParameters() map[string]string {
+ return map[string]string{
+ "Router": "shared",
+ }
+}
+
+func (p *Provider) AppStatus(name string) (string, error) {
+ return "running", nil
+}
diff --git a/provider/azure/azure.go b/provider/azure/azure.go
new file mode 100644
index 0000000..2c3620e
--- /dev/null
+++ b/provider/azure/azure.go
@@ -0,0 +1,164 @@
+package azure
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+
+ "github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage"
+ "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
+ "github.com/Azure/azure-storage-file-go/azfile"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure/auth"
+ "github.com/convox/convox/pkg/structs"
+ "github.com/convox/convox/provider/k8s"
+ "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+type Provider struct {
+ *k8s.Provider
+
+ ClientID string
+ ClientSecret string
+ Region string
+ Registry string
+ ResourceGroup string
+ StorageAccount string
+ StorageShare string
+ Subscription string
+ Workspace string
+
+ insightLogs *operationalinsights.QueryClient
+ storageDirectory *azfile.DirectoryURL
+}
+
+func FromEnv() (*Provider, error) {
+ k, err := k8s.FromEnv()
+ if err != nil {
+ return nil, err
+ }
+
+ p := &Provider{
+ Provider: k,
+ ClientID: os.Getenv("AZURE_CLIENT_ID"),
+ ClientSecret: os.Getenv("AZURE_CLIENT_SECRET"),
+ Region: os.Getenv("REGION"),
+ Registry: os.Getenv("REGISTRY"),
+ ResourceGroup: os.Getenv("RESOURCE_GROUP"),
+ StorageAccount: os.Getenv("STORAGE_ACCOUNT"),
+ StorageShare: os.Getenv("STORAGE_SHARE"),
+ Subscription: os.Getenv("AZURE_SUBSCRIPTION_ID"),
+ Workspace: os.Getenv("WORKSPACE"),
+ }
+
+ fmt.Printf("p: %+v\n", p)
+
+ k.Engine = p
+
+ return p, nil
+}
+
+func (p *Provider) Initialize(opts structs.ProviderOptions) error {
+ if err := p.initializeAzureServices(); err != nil {
+ return err
+ }
+
+ if err := p.Provider.Initialize(opts); err != nil {
+ return err
+ }
+
+ runtime.ErrorHandlers = []func(error){}
+
+ return nil
+}
+
+func (p *Provider) WithContext(ctx context.Context) structs.Provider {
+ pp := *p
+ pp.Provider = pp.Provider.WithContext(ctx).(*k8s.Provider)
+ return &pp
+}
+
+func (p *Provider) initializeAzureServices() error {
+ il, err := p.azureInsightLogs()
+ if err != nil {
+ return err
+ }
+
+ p.insightLogs = il
+
+ sd, err := p.azureStorageDirectory()
+ if err != nil {
+ return err
+ }
+
+ p.storageDirectory = sd
+
+ return nil
+}
+
+func (p *Provider) azureAuthorizer(resource string) (autorest.Authorizer, error) {
+ a, err := auth.NewAuthorizerFromEnvironmentWithResource(resource)
+ if err != nil {
+ return nil, err
+ }
+
+ return a, nil
+}
+
+func (p *Provider) azureInsightLogs() (*operationalinsights.QueryClient, error) {
+ a, err := p.azureAuthorizer("https://api.loganalytics.io")
+ if err != nil {
+ return nil, err
+ }
+
+ qc := operationalinsights.NewQueryClient()
+ qc.Authorizer = a
+
+ return &qc, nil
+}
+
+func (p *Provider) azureStorageDirectory() (*azfile.DirectoryURL, error) {
+ k, err := p.azureStorageKey()
+ if err != nil {
+ return nil, err
+ }
+
+ cred, err := azfile.NewSharedKeyCredential(p.StorageAccount, k)
+ if err != nil {
+ return nil, err
+ }
+
+ pipe := azfile.NewPipeline(cred, azfile.PipelineOptions{})
+
+ u, err := url.Parse(fmt.Sprintf("https://%s.file.core.windows.net", p.StorageAccount))
+ if err != nil {
+ return nil, err
+ }
+
+ dir := azfile.NewServiceURL(*u, pipe).NewShareURL(p.StorageShare).NewRootDirectoryURL()
+
+ return &dir, nil
+}
+
+func (p *Provider) azureStorageKey() (string, error) {
+ ctx := context.Background()
+
+ a, err := p.azureAuthorizer("https://management.azure.com")
+ if err != nil {
+ return "", err
+ }
+
+ ac := storage.NewAccountsClient(p.Subscription)
+ ac.Authorizer = a
+
+ res, err := ac.ListKeys(ctx, p.ResourceGroup, p.StorageAccount, storage.Kerb)
+ if err != nil {
+ return "", err
+ }
+ if len(*res.Keys) < 1 {
+ return "", fmt.Errorf("could not find account key")
+ }
+
+ return *(*res.Keys)[0].Value, nil
+}
diff --git a/provider/azure/build.go b/provider/azure/build.go
new file mode 100644
index 0000000..da1d639
--- /dev/null
+++ b/provider/azure/build.go
@@ -0,0 +1,75 @@
+package azure
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os/exec"
+ "strings"
+
+ "github.com/convox/convox/pkg/structs"
+)
+
+func (p *Provider) BuildExport(app, id string, w io.Writer) error {
+ if err := p.authAppRepository(app); err != nil {
+ return err
+ }
+
+ return p.Provider.BuildExport(app, id, w)
+}
+
+func (p *Provider) BuildImport(app string, r io.Reader) (*structs.Build, error) {
+ if err := p.authAppRepository(app); err != nil {
+ return nil, err
+ }
+
+ return p.Provider.BuildImport(app, r)
+}
+
+func (p *Provider) BuildLogs(app, id string, opts structs.LogsOptions) (io.ReadCloser, error) {
+ b, err := p.BuildGet(app, id)
+ if err != nil {
+ return nil, err
+ }
+
+ opts.Since = nil
+
+ switch b.Status {
+ case "running":
+ return p.ProcessLogs(app, b.Process, opts)
+ default:
+ u, err := url.Parse(b.Logs)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "object":
+ return p.ObjectFetch(u.Hostname(), u.Path)
+ default:
+ return nil, fmt.Errorf("unable to read logs for build: %s", id)
+ }
+ }
+}
+
+func (p *Provider) authAppRepository(app string) error {
+ repo, _, err := p.RepositoryHost(app)
+ if err != nil {
+ return err
+ }
+
+ user, pass, err := p.RepositoryAuth(app)
+ if err != nil {
+ return err
+ }
+
+ cmd := exec.Command("docker", "login", "-u", user, "--password-stdin", repo)
+
+ cmd.Stdin = strings.NewReader(pass)
+
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/provider/azure/certificate.go b/provider/azure/certificate.go
new file mode 100644
index 0000000..6d8c768
--- /dev/null
+++ b/provider/azure/certificate.go
@@ -0,0 +1,46 @@
+package azure
+
+// import (
+// "fmt"
+
+// gc "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/apis/networking.gke.io/v1beta1"
+// "github.com/convox/convox/pkg/structs"
+// am "k8s.io/apimachinery/pkg/apis/meta/v1"
+// )
+
+// func (p *Provider) CertificateGenerate(domains []string) (*structs.Certificate, error) {
+// switch len(domains) {
+// case 0:
+// return nil, fmt.Errorf("must specify a domain")
+// case 1:
+// default:
+// return nil, fmt.Errorf("must specify only one domain for gcp managed certificates")
+// }
+
+// gmc, err := p.gkeManagedCertsClient()
+// if err != nil {
+// return nil, err
+// }
+
+// cert := &gc.ManagedCertificate{
+// ObjectMeta: am.ObjectMeta{
+// GenerateName: "managed-",
+// Namespace: p.Namespace,
+// },
+// Spec: gc.ManagedCertificateSpec{
+// Domains: domains,
+// },
+// Status: gc.ManagedCertificateStatus{
+// DomainStatus: []gc.DomainStatus{},
+// },
+// }
+
+// c, err := gmc.NetworkingV1beta1().ManagedCertificates(p.Namespace).Create(cert)
+// if err != nil {
+// return nil, err
+// }
+
+// fmt.Printf("c: %+v\n", c)
+
+// return &structs.Certificate{}, nil
+// }
diff --git a/provider/azure/deployment.go b/provider/azure/deployment.go
new file mode 100644
index 0000000..f99f97d
--- /dev/null
+++ b/provider/azure/deployment.go
@@ -0,0 +1,5 @@
+package azure
+
+func (p *Provider) DeploymentTimeout() int {
+ return 1800
+}
diff --git a/provider/azure/heartbeat.go b/provider/azure/heartbeat.go
new file mode 100644
index 0000000..92aaae8
--- /dev/null
+++ b/provider/azure/heartbeat.go
@@ -0,0 +1,9 @@
+package azure
+
+func (p *Provider) Heartbeat() (map[string]interface{}, error) {
+ hs := map[string]interface{}{
+ "region": p.Region,
+ }
+
+ return hs, nil
+}
diff --git a/provider/azure/helpers.go b/provider/azure/helpers.go
new file mode 100644
index 0000000..578510c
--- /dev/null
+++ b/provider/azure/helpers.go
@@ -0,0 +1,106 @@
+package azure
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+ "time"
+
+ // gv "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/clientgen/clientset/versioned"
+ am "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func (p *Provider) appRegistry(app string) (string, error) {
+ ns, err := p.Provider.Cluster.CoreV1().Namespaces().Get(p.AppNamespace(app), am.GetOptions{})
+ if err != nil {
+ return "", err
+ }
+
+ registry, ok := ns.ObjectMeta.Annotations["convox.registry"]
+ if !ok {
+ return "", fmt.Errorf("no registry for app: %s", app)
+ }
+
+ return registry, nil
+}
+
+// func (p *Provider) gkeManagedCertsClient() (gv.Interface, error) {
+// return gv.NewForConfig(p.Config)
+// }
+
+func (p *Provider) watchForProcessTermination(ctx context.Context, app, pid string, cancel func()) {
+ defer cancel()
+
+ tick := time.NewTicker(2 * time.Second)
+ defer tick.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tick.C:
+ if _, err := p.ProcessGet(app, pid); err != nil {
+ time.Sleep(2 * time.Second)
+ cancel()
+ return
+ }
+ }
+ }
+}
+
+func kubectl(args ...string) error {
+ cmd := exec.Command("kubectl", args...)
+
+ cmd.Env = os.Environ()
+
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return errors.New(strings.TrimSpace(string(out)))
+ }
+
+ return nil
+}
+
+var outputConverter = regexp.MustCompile("([a-z])([A-Z])") // lower case letter followed by upper case
+
+func outputToEnvironment(name string) string {
+ return strings.ToUpper(outputConverter.ReplaceAllString(name, "${1}_${2}"))
+}
+
+func upperName(name string) string {
+ if name == "" {
+ return ""
+ }
+
+ // replace underscores with dashes
+ name = strings.Replace(name, "_", "-", -1)
+
+ // myapp -> Myapp; my-app -> MyApp
+ us := strings.ToUpper(name[0:1]) + name[1:]
+
+ for {
+ i := strings.Index(us, "-")
+
+ if i == -1 {
+ break
+ }
+
+ s := us[0:i]
+
+ if len(us) > i+1 {
+ s += strings.ToUpper(us[i+1 : i+2])
+ }
+
+ if len(us) > i+2 {
+ s += us[i+2:]
+ }
+
+ us = s
+ }
+
+ return us
+}
diff --git a/provider/azure/ingress.go b/provider/azure/ingress.go
new file mode 100644
index 0000000..b3e6880
--- /dev/null
+++ b/provider/azure/ingress.go
@@ -0,0 +1,23 @@
+package azure
+
+func (p *Provider) IngressAnnotations(app string) (map[string]string, error) {
+ a, err := p.AppGet(app)
+ if err != nil {
+ return nil, err
+ }
+
+ ans := map[string]string{
+ "kubernetes.io/ingress.class": "convox",
+ }
+
+ switch a.Parameters["Router"] {
+ case "dedicated":
+ ans["kubernetes.io/ingress.class"] = "gce"
+ }
+
+ return ans, nil
+}
+
+func (p *Provider) IngressSecrets(app string) ([]string, error) {
+ return []string{}, nil
+}
diff --git a/provider/azure/log.go b/provider/azure/log.go
new file mode 100644
index 0000000..1d5be53
--- /dev/null
+++ b/provider/azure/log.go
@@ -0,0 +1,121 @@
+package azure
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
+ "github.com/convox/convox/pkg/common"
+ "github.com/convox/convox/pkg/options"
+ "github.com/convox/convox/pkg/structs"
+)
+
+// var sequenceTokens sync.Map
+
+func (p *Provider) Log(app, stream string, ts time.Time, message string) error {
+ return nil
+}
+
+func (p *Provider) AppLogs(name string, opts structs.LogsOptions) (io.ReadCloser, error) {
+ r, w := io.Pipe()
+
+ go p.insightContainerLogs(p.Context(), w, p.AppNamespace(name), opts)
+
+ return r, nil
+}
+
+func (p *Provider) SystemLogs(opts structs.LogsOptions) (io.ReadCloser, error) {
+ return p.AppLogs("system", opts)
+}
+
+func (p *Provider) insightContainerLogs(ctx context.Context, w io.WriteCloser, namespace string, opts structs.LogsOptions) {
+ defer w.Close()
+
+ since := common.DefaultDuration(opts.Since, 0)
+ start := time.Now().Add(-1 * since)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ // check for closed writer
+ if _, err := w.Write([]byte{}); err != nil {
+ return
+ }
+
+ query := operationalinsights.QueryBody{
+ Query: options.String(fmt.Sprintf("KubePodInventory | join kind=innerunique ContainerLog on ContainerID | project Timestamp=TimeGenerated1,Message=LogEntry,Namespace,Pod=Name,Labels=PodLabel | where Namespace==%q and Timestamp > datetime(%s) | order by Timestamp asc | limit 100", namespace, start.Format("2006-01-02 15:04:05.000"))),
+ Timespan: options.String("P7D"),
+ }
+
+ res, err := p.insightLogs.Execute(context.Background(), p.Workspace, query)
+ if err != nil {
+ fmt.Printf("err: %+v\n", err)
+ return
+ }
+ if len(*res.Tables) < 1 {
+ fmt.Println("no tables")
+ return
+ }
+
+ t := (*res.Tables)[0]
+
+ if len(*t.Rows) == 0 && !common.DefaultBool(opts.Follow, true) {
+ return
+ }
+
+ for _, row := range *t.Rows {
+ attrs := parseRow(row, *t.Columns)
+
+ ts, err := time.Parse("2006-01-02T15:04:05.999Z", attrs["Timestamp"])
+ if err != nil {
+ fmt.Printf("err: %+v\n", err)
+ continue
+ }
+
+ if ts.After(start) {
+ start = ts
+ }
+
+ var labels map[string]string
+
+ if err := json.Unmarshal([]byte(strings.Trim(attrs["Labels"], "[]")), &labels); err != nil {
+ fmt.Printf("err: %+v\n", err)
+ continue
+ }
+
+ service := labels["service"]
+ pod := attrs["Pod"]
+
+ prefix := ""
+
+ if common.DefaultBool(opts.Prefix, false) {
+ prefix = fmt.Sprintf("%s service/%s/%s ", ts.Format(time.RFC3339), service, pod)
+ }
+
+ if _, err := w.Write([]byte(fmt.Sprintf("%s%s\n", prefix, attrs["Message"]))); err != nil {
+ fmt.Printf("err: %+v\n", err)
+ }
+ }
+
+ time.Sleep(5 * time.Second)
+ }
+ }
+}
+
+func parseRow(row []interface{}, cols []operationalinsights.Column) map[string]string {
+ attrs := map[string]string{}
+
+ for i, c := range cols {
+ if v, ok := row[i].(string); ok && c.Name != nil {
+ attrs[*c.Name] = v
+ }
+ }
+
+ return attrs
+}
diff --git a/provider/azure/manifest.go b/provider/azure/manifest.go
new file mode 100644
index 0000000..db7682f
--- /dev/null
+++ b/provider/azure/manifest.go
@@ -0,0 +1,25 @@
+package azure
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/convox/convox/pkg/manifest"
+)
+
+func (p *Provider) ManifestValidate(m *manifest.Manifest) error {
+ errs := []string{}
+
+ for _, s := range m.Services {
+ if len(s.Volumes) > 0 {
+ errs = append(errs, fmt.Sprintf("shared volumes are not supported on gcp"))
+ break
+ }
+ }
+
+ if len(errs) > 0 {
+ return fmt.Errorf("manifest valiation errors:\n%s", strings.Join(errs, "\n"))
+ }
+
+ return nil
+}
diff --git a/provider/azure/object.go b/provider/azure/object.go
new file mode 100644
index 0000000..2d48f56
--- /dev/null
+++ b/provider/azure/object.go
@@ -0,0 +1,191 @@
+package azure
+
+import (
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/Azure/azure-storage-file-go/azfile"
+ "github.com/convox/convox/pkg/structs"
+)
+
+func (p *Provider) ObjectDelete(app, key string) error {
+ ctx := p.Context()
+
+ if _, err := p.storageFile(p.objectKey(app, key)).Delete(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Provider) ObjectExists(app, key string) (bool, error) {
+ if _, err := p.storageFile(p.objectKey(app, key)).GetProperties(p.Context()); err != nil {
+ if azerr, ok := err.(azfile.StorageError); ok && azerr.ServiceCode() == "ResourceNotFound" {
+ return false, nil
+ }
+
+ return false, err
+ }
+
+ return false, nil
+}
+
+func (p *Provider) ObjectFetch(app, key string) (io.ReadCloser, error) {
+ ctx := p.Context()
+
+ res, err := p.storageFile(p.objectKey(app, key)).Download(ctx, 0, azfile.CountToEnd, false)
+ if err != nil {
+ if azerr, ok := err.(azfile.StorageError); ok && azerr.ServiceCode() == "ResourceNotFound" {
+ return nil, fmt.Errorf("no such key")
+ }
+
+ return nil, err
+ }
+
+ r := res.Body(azfile.RetryReaderOptions{})
+
+ return r, nil
+}
+
+func (p *Provider) ObjectList(app, prefix string) ([]string, error) {
+ ctx := p.Context()
+
+ dir := p.storageDirectory.NewDirectoryURL(p.objectKey(app, prefix))
+
+ fs := []string{}
+
+ for marker := (azfile.Marker{}); marker.NotDone(); {
+ res, err := dir.ListFilesAndDirectoriesSegment(ctx, marker, azfile.ListFilesAndDirectoriesOptions{})
+ if err != nil {
+ if azerr, ok := err.(azfile.StorageError); ok && azerr.ServiceCode() == "ResourceNotFound" {
+ return []string{}, nil
+ }
+
+ return nil, err
+ }
+
+ marker = res.NextMarker
+
+ for _, file := range res.FileItems {
+ fs = append(fs, file.Name)
+ }
+ }
+
+ return fs, nil
+}
+
+func (p *Provider) ObjectStore(app, key string, r io.Reader, opts structs.ObjectStoreOptions) (*structs.Object, error) {
+ ctx := p.Context()
+
+ if key == "" {
+ k, err := generateTempKey()
+ if err != nil {
+ return nil, err
+ }
+ key = k
+ }
+
+ name := p.objectKey(app, key)
+
+ if err := p.storageMkdir(name); err != nil {
+ return nil, err
+ }
+
+ fw, err := ioutil.TempFile("", "")
+ if err != nil {
+ return nil, err
+ }
+ defer fw.Close()
+ defer os.Remove(fw.Name())
+
+ if _, err := io.Copy(fw, r); err != nil {
+ return nil, err
+ }
+
+ if err := fw.Close(); err != nil {
+ return nil, err
+ }
+
+ fr, err := os.Open(fw.Name())
+ if err != nil {
+ return nil, err
+ }
+ defer fr.Close()
+
+ stat, err := fr.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ file := p.storageFile(name)
+
+ if _, err := file.Create(ctx, stat.Size(), azfile.FileHTTPHeaders{}, azfile.Metadata{}); err != nil {
+ return nil, err
+ }
+
+ if _, err := file.UploadRange(ctx, 0, fr, nil); err != nil {
+ return nil, err
+ }
+
+ url := fmt.Sprintf("object://%s/%s", app, key)
+
+ o := &structs.Object{Url: url}
+
+ return o, nil
+}
+
+func (p *Provider) objectKey(app, key string) string {
+ return fmt.Sprintf("%s/%s", app, strings.TrimPrefix(key, "/"))
+}
+
+func (p *Provider) storageFile(key string) azfile.FileURL {
+ return p.storageDirectory.NewFileURL(key)
+}
+
+func (p *Provider) storageMkdir(file string) error {
+ ctx := p.Context()
+
+ parts := strings.Split(file, "/")
+ if len(parts) < 2 {
+ return nil
+ }
+
+ dir := *p.storageDirectory
+
+ for _, name := range parts[0 : len(parts)-1] {
+ dir = dir.NewDirectoryURL(name)
+
+ if _, err := dir.Create(ctx, azfile.Metadata{}); err != nil {
+ if azerr, ok := err.(azfile.StorageError); ok {
+ if azerr.ServiceCode() == "ResourceAlreadyExists" {
+ continue
+ }
+ if azerr.ServiceCode() == "ResourceTypeMismatch" {
+ return fmt.Errorf("unable to create directory")
+ }
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func generateTempKey() (string, error) {
+ data := make([]byte, 1024)
+
+ if _, err := rand.Read(data); err != nil {
+ return "", err
+ }
+
+ hash := sha256.Sum256(data)
+
+ return fmt.Sprintf("tmp/%s", hex.EncodeToString(hash[:])[0:30]), nil
+}
diff --git a/provider/azure/repository.go b/provider/azure/repository.go
new file mode 100644
index 0000000..91d8579
--- /dev/null
+++ b/provider/azure/repository.go
@@ -0,0 +1,13 @@
+package azure
+
+import (
+ "fmt"
+)
+
+func (p *Provider) RepositoryAuth(app string) (string, string, error) {
+ return p.ClientID, p.ClientSecret, nil
+}
+
+func (p *Provider) RepositoryHost(app string) (string, bool, error) {
+ return fmt.Sprintf("%s/%s", p.Registry, app), true, nil
+}
diff --git a/provider/azure/resolver.go b/provider/azure/resolver.go
new file mode 100644
index 0000000..9e9ddec
--- /dev/null
+++ b/provider/azure/resolver.go
@@ -0,0 +1,7 @@
+package azure
+
+import "fmt"
+
+func (p *Provider) Resolver() (string, error) {
+ return "", fmt.Errorf("no resolver")
+}
diff --git a/provider/azure/service.go b/provider/azure/service.go
new file mode 100644
index 0000000..2ee2e7b
--- /dev/null
+++ b/provider/azure/service.go
@@ -0,0 +1,11 @@
+package azure
+
+import (
+ "fmt"
+
+ "github.com/convox/convox/pkg/manifest"
+)
+
+func (p *Provider) ServiceHost(app string, s manifest.Service) string {
+ return fmt.Sprintf("%s.%s.%s", s.Name, app, p.Domain)
+}
diff --git a/provider/azure/system.go b/provider/azure/system.go
new file mode 100644
index 0000000..94620b3
--- /dev/null
+++ b/provider/azure/system.go
@@ -0,0 +1,9 @@
+package azure
+
+func (p *Provider) SystemHost() string {
+ return p.Domain
+}
+
+func (p *Provider) SystemStatus() (string, error) {
+ return "running", nil
+}
diff --git a/provider/provider.go b/provider/provider.go
index 55703da..7a7912e 100644
--- a/provider/provider.go
+++ b/provider/provider.go
@@ -6,6 +6,7 @@ import (
"github.com/convox/convox/pkg/structs"
"github.com/convox/convox/provider/aws"
+ "github.com/convox/convox/provider/azure"
"github.com/convox/convox/provider/do"
"github.com/convox/convox/provider/gcp"
"github.com/convox/convox/provider/k8s"
@@ -19,6 +20,8 @@ func FromEnv() (structs.Provider, error) {
switch name {
case "aws":
return aws.FromEnv()
+ case "azure":
+ return azure.FromEnv()
case "do":
return do.FromEnv()
case "gcp":
diff --git a/terraform/api/azure/identity.tf b/terraform/api/azure/identity.tf
new file mode 100644
index 0000000..35cf504
--- /dev/null
+++ b/terraform/api/azure/identity.tf
@@ -0,0 +1,44 @@
+# resource "azurerm_user_assigned_identity" "api" {
+# resource_group_name = data.azurerm_resource_group.rack.name
+# location = data.azurerm_resource_group.rack.location
+
+# name = "api"
+# }
+
+# resource "azurerm_role_assignment" "identity-api-contributor" {
+# scope = data.azurerm_resource_group.rack.id
+# role_definition_name = "Contributor"
+# principal_id = azurerm_user_assigned_identity.api.principal_id
+# }
+
+# data "template_file" "identity" {
+# template = file("${path.module}/identity.yml.tpl")
+
+# vars = {
+# namespace = var.namespace
+# resource = azurerm_user_assigned_identity.api.id
+# client = azurerm_user_assigned_identity.api.client_id
+# }
+# }
+
+# resource "null_resource" "deployment" {
+# provisioner "local-exec" {
+# when = "create"
+# command = "echo '${data.template_file.identity.rendered}' | kubectl apply -f -"
+# environment = {
+# "KUBECONFIG" : var.kubeconfig,
+# }
+# }
+
+# provisioner "local-exec" {
+# when = "destroy"
+# command = "echo '${data.template_file.identity.rendered}' | kubectl delete -f -"
+# environment = {
+# "KUBECONFIG" : var.kubeconfig,
+# }
+# }
+
+# triggers = {
+# template = sha256(data.template_file.identity.rendered)
+# }
+# }
diff --git a/terraform/api/azure/identity.yml.tpl b/terraform/api/azure/identity.yml.tpl
new file mode 100644
index 0000000..af3b108
--- /dev/null
+++ b/terraform/api/azure/identity.yml.tpl
@@ -0,0 +1,18 @@
+apiVersion: "aadpodidentity.k8s.io/v1"
+kind: AzureIdentity
+metadata:
+ namespace: ${namespace}
+ name: api
+spec:
+ type: 0
+ ResourceID: ${resource}
+ ClientID: ${client}
+---
+apiVersion: "aadpodidentity.k8s.io/v1"
+kind: AzureIdentityBinding
+metadata:
+ namespace: ${namespace}
+ name: api
+spec:
+ AzureIdentity: api
+ Selector: api
\ No newline at end of file
diff --git a/terraform/api/azure/main.tf b/terraform/api/azure/main.tf
new file mode 100644
index 0000000..9a3cc60
--- /dev/null
+++ b/terraform/api/azure/main.tf
@@ -0,0 +1,77 @@
+terraform {
+ required_version = ">= 0.12.0"
+}
+
+provider "azuread" {
+ version = "~> 0.7"
+}
+
+provider "azurerm" {
+ version = "~> 1.36"
+}
+
+provider "kubernetes" {
+ version = "~> 1.8"
+
+ config_path = var.kubeconfig
+}
+
+provider "template" {
+ version = "~> 2.1"
+}
+
+locals {
+ tags = {
+ System = "convox"
+ Rack = var.name
+ }
+}
+
+data "azurerm_client_config" "current" {}
+
+data "azurerm_resource_group" "rack" {
+ name = var.resource_group
+}
+
+data "azurerm_subscription" "current" {}
+
+resource "random_string" "suffix" {
+ length = 12
+ special = false
+ upper = false
+}
+
+module "k8s" {
+ source = "../k8s"
+
+ providers = {
+ kubernetes = kubernetes
+ }
+
+ domain = var.domain
+ kubeconfig = var.kubeconfig
+ name = var.name
+ namespace = var.namespace
+ release = var.release
+
+ annotations = {}
+
+ labels = {
+ "aadpodidbinding" : "api"
+ }
+
+ env = {
+ AZURE_CLIENT_ID = azuread_service_principal.api.application_id
+ AZURE_CLIENT_SECRET = azuread_service_principal_password.api.value
+ AZURE_SUBSCRIPTION_ID = data.azurerm_subscription.current.subscription_id
+ AZURE_TENANT_ID = data.azurerm_client_config.current.tenant_id
+ PROVIDER = "azure"
+ REGION = var.region
+ REGISTRY = azurerm_container_registry.registry.login_server
+ RESOURCE_GROUP = var.resource_group
+ ROUTER = var.router
+ STORAGE_ACCOUNT = azurerm_storage_account.storage.name
+ STORAGE_SHARE = azurerm_storage_share.storage.name
+ WORKSPACE = var.workspace
+ }
+}
diff --git a/terraform/api/azure/outputs.tf b/terraform/api/azure/outputs.tf
new file mode 100644
index 0000000..ccfa687
--- /dev/null
+++ b/terraform/api/azure/outputs.tf
@@ -0,0 +1,3 @@
+output "endpoint" {
+ value = module.k8s.endpoint
+}
diff --git a/terraform/api/azure/principal.tf b/terraform/api/azure/principal.tf
new file mode 100644
index 0000000..3b16cb0
--- /dev/null
+++ b/terraform/api/azure/principal.tf
@@ -0,0 +1,28 @@
+resource "azuread_application" "api" {
+ name = "api"
+ available_to_other_tenants = false
+ oauth2_allow_implicit_flow = true
+}
+
+resource "azuread_service_principal" "api" {
+ application_id = azuread_application.api.application_id
+ app_role_assignment_required = false
+}
+
+resource "random_string" "api_password" {
+ length = 30
+ special = true
+ upper = true
+}
+
+resource "azuread_service_principal_password" "api" {
+ service_principal_id = azuread_service_principal.api.id
+ value = random_string.api_password.result
+ end_date = "2099-01-01T00:00:00Z"
+}
+
+resource "azurerm_role_assignment" "principal_api_contributor" {
+ scope = data.azurerm_resource_group.rack.id
+ role_definition_name = "Contributor"
+ principal_id = azuread_service_principal.api.id
+}
diff --git a/terraform/api/azure/registry.tf b/terraform/api/azure/registry.tf
new file mode 100644
index 0000000..22c6bf6
--- /dev/null
+++ b/terraform/api/azure/registry.tf
@@ -0,0 +1,6 @@
+resource "azurerm_container_registry" "registry" {
+ name = "${format("%.12s", var.name)}${random_string.suffix.result}"
+ resource_group_name = "${data.azurerm_resource_group.rack.name}"
+ location = "${data.azurerm_resource_group.rack.location}"
+ sku = "Basic"
+}
diff --git a/terraform/api/azure/storage.tf b/terraform/api/azure/storage.tf
new file mode 100644
index 0000000..71a1fc9
--- /dev/null
+++ b/terraform/api/azure/storage.tf
@@ -0,0 +1,12 @@
+resource "azurerm_storage_account" "storage" {
+ name = "${format("%.12s", var.name)}${random_string.suffix.result}"
+ resource_group_name = "${data.azurerm_resource_group.rack.name}"
+ location = "${data.azurerm_resource_group.rack.location}"
+ account_tier = "Standard"
+ account_replication_type = "LRS"
+}
+
+resource "azurerm_storage_share" "storage" {
+ name = "storage"
+ storage_account_name = azurerm_storage_account.storage.name
+}
diff --git a/terraform/api/azure/variables.tf b/terraform/api/azure/variables.tf
new file mode 100644
index 0000000..881f76b
--- /dev/null
+++ b/terraform/api/azure/variables.tf
@@ -0,0 +1,35 @@
+variable "domain" {
+ type = "string"
+}
+
+variable "kubeconfig" {
+ type = "string"
+}
+
+variable "name" {
+ type = "string"
+}
+
+variable "namespace" {
+ type = "string"
+}
+
+variable "region" {
+ type = "string"
+}
+
+variable "release" {
+ type = "string"
+}
+
+variable "resource_group" {
+ type = "string"
+}
+
+variable "router" {
+ type = "string"
+}
+
+variable "workspace" {
+ type = "string"
+}
diff --git a/terraform/api/k8s/main.tf b/terraform/api/k8s/main.tf
index bd90281..a726098 100644
--- a/terraform/api/k8s/main.tf
+++ b/terraform/api/k8s/main.tf
@@ -259,7 +259,7 @@ resource "kubernetes_ingress" "api" {
name = "api"
annotations = {
- "convox.idles" : "true"
+ "convox.idles" : "false"
"convox.ingress.service.api.5443.protocol" : "https"
}
diff --git a/terraform/cluster/azure/kubeconfig.tpl b/terraform/cluster/azure/kubeconfig.tpl
new file mode 100644
index 0000000..271142b
--- /dev/null
+++ b/terraform/cluster/azure/kubeconfig.tpl
@@ -0,0 +1,19 @@
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: ${ca}
+ server: ${endpoint}
+ name: gcloud
+contexts:
+- context:
+ cluster: gcloud
+ user: gcloud
+ name: gcloud
+current-context: gcloud
+kind: Config
+preferences: {}
+users:
+- name: gcloud
+ user:
+ client-certificate-data: ${client_certificate}
+ client-key-data: ${client_key}
diff --git a/terraform/cluster/azure/main.tf b/terraform/cluster/azure/main.tf
new file mode 100644
index 0000000..9ea5f06
--- /dev/null
+++ b/terraform/cluster/azure/main.tf
@@ -0,0 +1,84 @@
+terraform {
+ required_version = ">= 0.12.0"
+}
+
+provider "azurerm" {
+ version = "~> 1.36"
+}
+
+provider "local" {
+ version = "~> 1.3"
+}
+
+provider "random" {
+ version = "~> 2.2"
+}
+
+data "azurerm_resource_group" "system" {
+ name = var.resource_group
+}
+
+data "azurerm_kubernetes_service_versions" "available" {
+ location = var.region
+ version_prefix = "1.14."
+}
+
+resource "random_string" "suffix" {
+ length = 6
+ special = false
+ upper = false
+}
+
+resource "azurerm_log_analytics_workspace" "rack" {
+ name = "${var.name}-${random_string.suffix.result}"
+ location = "${data.azurerm_resource_group.system.location}"
+ resource_group_name = "${data.azurerm_resource_group.system.name}"
+ sku = "PerGB2018"
+ retention_in_days = 30
+}
+
+resource "azurerm_kubernetes_cluster" "rack" {
+ name = var.name
+ location = data.azurerm_resource_group.system.location
+ resource_group_name = data.azurerm_resource_group.system.name
+ dns_prefix = var.name
+ kubernetes_version = data.azurerm_kubernetes_service_versions.available.latest_version
+
+ agent_pool_profile {
+ name = "default"
+ count = 3
+ vm_size = var.node_type
+ os_type = "Linux"
+ os_disk_size_gb = 30
+ }
+
+ addon_profile {
+ oms_agent {
+ enabled = true
+ log_analytics_workspace_id = azurerm_log_analytics_workspace.rack.id
+ }
+ }
+
+ service_principal {
+ client_id = azuread_service_principal.cluster.application_id
+ client_secret = azuread_service_principal_password.cluster.value
+ }
+}
+
+resource "local_file" "kubeconfig" {
+ depends_on = [
+ azurerm_kubernetes_cluster.rack,
+ ]
+
+ filename = pathexpand("~/.kube/config.azure.${var.name}")
+ content = templatefile("${path.module}/kubeconfig.tpl", {
+ ca = azurerm_kubernetes_cluster.rack.kube_config.0.cluster_ca_certificate
+ endpoint = azurerm_kubernetes_cluster.rack.kube_config.0.host
+ client_certificate = azurerm_kubernetes_cluster.rack.kube_config.0.client_certificate
+ client_key = azurerm_kubernetes_cluster.rack.kube_config.0.client_key
+ })
+
+ lifecycle {
+ ignore_changes = [content]
+ }
+}
diff --git a/terraform/cluster/azure/outputs.tf b/terraform/cluster/azure/outputs.tf
new file mode 100644
index 0000000..3410b91
--- /dev/null
+++ b/terraform/cluster/azure/outputs.tf
@@ -0,0 +1,11 @@
+output "kubeconfig" {
+ depends_on = [
+ local_file.kubeconfig,
+ azurerm_kubernetes_cluster.rack,
+ ]
+ value = local_file.kubeconfig.filename
+}
+
+output "workspace" {
+ value = azurerm_log_analytics_workspace.rack.workspace_id
+}
diff --git a/terraform/cluster/azure/principal.tf b/terraform/cluster/azure/principal.tf
new file mode 100644
index 0000000..7415896
--- /dev/null
+++ b/terraform/cluster/azure/principal.tf
@@ -0,0 +1,28 @@
+resource "azuread_application" "cluster" {
+ name = "cluster"
+ available_to_other_tenants = false
+ oauth2_allow_implicit_flow = true
+}
+
+resource "azuread_service_principal" "cluster" {
+ application_id = azuread_application.cluster.application_id
+ app_role_assignment_required = false
+}
+
+resource "random_string" "cluster_password" {
+ length = 30
+ special = true
+ upper = true
+}
+
+resource "azuread_service_principal_password" "cluster" {
+ service_principal_id = azuread_service_principal.cluster.id
+ value = random_string.cluster_password.result
+ end_date = "2099-01-01T00:00:00Z"
+}
+
+resource "azurerm_role_assignment" "cluster-contributor" {
+ scope = data.azurerm_resource_group.system.id
+ role_definition_name = "Contributor"
+ principal_id = azuread_service_principal.cluster.id
+}
diff --git a/terraform/cluster/azure/variables.tf b/terraform/cluster/azure/variables.tf
new file mode 100644
index 0000000..4bb2b51
--- /dev/null
+++ b/terraform/cluster/azure/variables.tf
@@ -0,0 +1,15 @@
+variable "name" {
+ type = string
+}
+
+variable "node_type" {
+ type = string
+}
+
+variable "region" {
+ type = string
+}
+
+variable "resource_group" {
+ type = string
+}
diff --git a/terraform/rack/azure/main.tf b/terraform/rack/azure/main.tf
new file mode 100644
index 0000000..7f48da0
--- /dev/null
+++ b/terraform/rack/azure/main.tf
@@ -0,0 +1,61 @@
+terraform {
+ required_version = ">= 0.12.0"
+}
+
+provider "azurerm" {
+ varsion = "~> 1.36"
+}
+
+provider "kubernetes" {
+ version = "~> 1.9"
+
+ config_path = var.kubeconfig
+}
+
+module "k8s" {
+ source = "../k8s"
+
+ providers = {
+ kubernetes = kubernetes
+ }
+
+ domain = module.router.endpoint
+ kubeconfig = var.kubeconfig
+ name = var.name
+ release = var.release
+}
+
+module "api" {
+ source = "../../api/azure"
+
+ providers = {
+ azurerm = azurerm
+ kubernetes = kubernetes
+ }
+
+ domain = module.router.endpoint
+ kubeconfig = var.kubeconfig
+ name = var.name
+ namespace = module.k8s.namespace
+ region = var.region
+ release = var.release
+ resource_group = var.resource_group
+ router = module.router.endpoint
+ # secret = random_string.secret.result
+ workspace = var.workspace
+}
+
+module "router" {
+ source = "../../router/azure"
+
+ providers = {
+ azurerm = azurerm
+ kubernetes = kubernetes
+ }
+
+ name = var.name
+ namespace = module.k8s.namespace
+ region = var.region
+ release = var.release
+ resource_group = var.resource_group
+}
diff --git a/terraform/rack/azure/outputs.tf b/terraform/rack/azure/outputs.tf
new file mode 100644
index 0000000..4412fd9
--- /dev/null
+++ b/terraform/rack/azure/outputs.tf
@@ -0,0 +1,7 @@
+output "api" {
+ value = module.api.endpoint
+}
+
+output "endpoint" {
+ value = module.router.endpoint
+}
diff --git a/terraform/rack/azure/registry.tf b/terraform/rack/azure/registry.tf
new file mode 100644
index 0000000..9e05592
--- /dev/null
+++ b/terraform/rack/azure/registry.tf
@@ -0,0 +1,174 @@
+# resource "random_string" "suffix" {
+# length = 12
+# special = false
+# upper = false
+# }
+
+# resource "digitalocean_spaces_bucket" "registry" {
+# name = "${var.name}-registry-${random_string.suffix.result}"
+# region = var.region
+# acl = "private"
+# }
+
+# resource "random_string" "secret" {
+# length = 30
+# }
+
+# resource "kubernetes_deployment" "registry" {
+# metadata {
+# namespace = module.k8s.namespace
+# name = "registry"
+
+# labels = {
+# serivce = "registry"
+# }
+# }
+
+# spec {
+# min_ready_seconds = 1
+# revision_history_limit = 0
+
+# selector {
+# match_labels = {
+# system = "convox"
+# service = "registry"
+# }
+# }
+
+# strategy {
+# type = "RollingUpdate"
+# rolling_update {
+# max_surge = 1
+# max_unavailable = 0
+# }
+# }
+
+# template {
+# metadata {
+# labels = {
+# system = "convox"
+# service = "registry"
+# }
+# }
+
+# spec {
+# container {
+# name = "main"
+# image = "registry:2"
+# image_pull_policy = "IfNotPresent"
+
+# env {
+# name = "REGISTRY_HTTP_SECRET"
+# value = random_string.secret.result
+# }
+
+# env {
+# name = "REGISTRY_STORAGE"
+# value = "s3"
+# }
+
+# env {
+# name = "REGISTRY_STORAGE_S3_ACCESSKEY"
+# value = var.access_id
+# }
+
+# env {
+# name = "REGISTRY_STORAGE_S3_BUCKET"
+# value = digitalocean_spaces_bucket.registry.name
+# }
+
+# env {
+# name = "REGISTRY_STORAGE_S3_REGION"
+# value = var.region
+# }
+
+# env {
+# name = "REGISTRY_STORAGE_S3_REGIONENDPOINT"
+# value = "https://${var.region}.digitaloceanspaces.com"
+# }
+
+# env {
+# name = "REGISTRY_STORAGE_S3_SECRETKEY"
+# value = var.secret_key
+# }
+
+# port {
+# container_port = 5000
+# protocol = "TCP"
+# }
+
+# volume_mount {
+# name = "registry"
+# mount_path = "/var/lib/registry"
+# }
+# }
+
+# volume {
+# name = "registry"
+
+# host_path {
+# path = "/var/lib/registry"
+# }
+# }
+# }
+# }
+# }
+# }
+
+# resource "kubernetes_service" "registry" {
+# metadata {
+# namespace = module.k8s.namespace
+# name = "registry"
+# }
+
+# spec {
+# type = "ClusterIP"
+
+# selector = {
+# system = "convox"
+# service = "registry"
+# }
+
+# port {
+# name = "http"
+# port = 80
+# target_port = 5000
+# protocol = "TCP"
+# }
+# }
+# }
+# resource "kubernetes_ingress" "registry" {
+# metadata {
+# namespace = module.k8s.namespace
+# name = "registry"
+
+# annotations = {
+# "convox.idles" : "true"
+# }
+
+# labels = {
+# system = "convox"
+# service = "registry"
+# }
+# }
+
+# spec {
+# tls {
+# hosts = ["registry.${module.router.endpoint}"]
+# }
+
+# rule {
+# host = "registry.${module.router.endpoint}"
+
+# http {
+# path {
+# backend {
+# service_name = kubernetes_service.registry.metadata.0.name
+# service_port = 80
+# }
+# }
+# }
+# }
+# }
+# }
+
diff --git a/terraform/rack/azure/variables.tf b/terraform/rack/azure/variables.tf
new file mode 100644
index 0000000..e50d334
--- /dev/null
+++ b/terraform/rack/azure/variables.tf
@@ -0,0 +1,27 @@
+# variable "identity" {
+# type = "string"
+# }
+
+variable "kubeconfig" {
+ type = "string"
+}
+
+variable "name" {
+ type = "string"
+}
+
+variable "region" {
+ type = "string"
+}
+
+variable "release" {
+ type = "string"
+}
+
+variable "resource_group" {
+ type = "string"
+}
+
+variable "workspace" {
+ type = "string"
+}
diff --git a/terraform/router/azure/main.tf b/terraform/router/azure/main.tf
new file mode 100644
index 0000000..b84d0ca
--- /dev/null
+++ b/terraform/router/azure/main.tf
@@ -0,0 +1,82 @@
+terraform {
+ required_version = ">= 0.12.0"
+}
+
+provider "azurerm" {
+ version = "~> 1.36"
+}
+
+provider "http" {
+ version = "~> 1.1"
+}
+
+provider "kubernetes" {
+ version = "~> 1.9"
+}
+
+locals {
+ tags = {
+ System = "convox"
+ Rack = var.name
+ }
+}
+
+data "azurerm_resource_group" "rack" {
+ name = var.resource_group
+}
+
+module "k8s" {
+ source = "../k8s"
+
+ providers = {
+ kubernetes = kubernetes
+ }
+
+ namespace = var.namespace
+ release = var.release
+
+ env = {
+ CACHE = "redis"
+ REDIS_ADDR = "${azurerm_redis_cache.cache.hostname}:${azurerm_redis_cache.cache.ssl_port}"
+ REDIS_AUTH = azurerm_redis_cache.cache.primary_access_key
+ REDIS_SECURE = "true"
+ }
+}
+
+resource "kubernetes_service" "router" {
+ metadata {
+ namespace = var.namespace
+ name = "router"
+ }
+
+ spec {
+ type = "LoadBalancer"
+
+ port {
+ name = "http"
+ port = 80
+ protocol = "TCP"
+ target_port = 80
+ }
+
+ port {
+ name = "https"
+ port = 443
+ protocol = "TCP"
+ target_port = 443
+ }
+
+ selector = {
+ system = "convox"
+ service = "router"
+ }
+ }
+
+ lifecycle {
+ ignore_changes = [metadata[0].annotations]
+ }
+}
+
+data "http" "alias" {
+ url = "https://alias.convox.com/alias/${kubernetes_service.router.load_balancer_ingress.0.ip}"
+}
diff --git a/terraform/router/azure/outputs.tf b/terraform/router/azure/outputs.tf
new file mode 100644
index 0000000..46bc596
--- /dev/null
+++ b/terraform/router/azure/outputs.tf
@@ -0,0 +1,4 @@
+output "endpoint" {
+ value = data.http.alias.body
+}
+
diff --git a/terraform/router/azure/redis.tf b/terraform/router/azure/redis.tf
new file mode 100644
index 0000000..baf7470
--- /dev/null
+++ b/terraform/router/azure/redis.tf
@@ -0,0 +1,8 @@
+resource "azurerm_redis_cache" "cache" {
+ name = "${var.name}-router"
+ location = data.azurerm_resource_group.rack.location
+ resource_group_name = data.azurerm_resource_group.rack.name
+ capacity = 0
+ family = "C"
+ sku_name = "Basic"
+}
diff --git a/terraform/router/azure/variables.tf b/terraform/router/azure/variables.tf
new file mode 100644
index 0000000..c9c0015
--- /dev/null
+++ b/terraform/router/azure/variables.tf
@@ -0,0 +1,19 @@
+variable "name" {
+ type = "string"
+}
+
+variable "namespace" {
+ type = "string"
+}
+
+variable "region" {
+ type = "string"
+}
+
+variable "release" {
+ type = "string"
+}
+
+variable "resource_group" {
+ type = "string"
+}
diff --git a/terraform/system/azure/identity/deployment.yml b/terraform/system/azure/identity/deployment.yml
new file mode 100644
index 0000000..2cdcb5c
--- /dev/null
+++ b/terraform/system/azure/identity/deployment.yml
@@ -0,0 +1,172 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: azureassignedidentities.aadpodidentity.k8s.io
+spec:
+ group: aadpodidentity.k8s.io
+ version: v1
+ names:
+ kind: AzureAssignedIdentity
+ plural: azureassignedidentities
+ scope: Namespaced
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: azureidentitybindings.aadpodidentity.k8s.io
+spec:
+ group: aadpodidentity.k8s.io
+ version: v1
+ names:
+ kind: AzureIdentityBinding
+ plural: azureidentitybindings
+ scope: Namespaced
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: azureidentities.aadpodidentity.k8s.io
+spec:
+ group: aadpodidentity.k8s.io
+ version: v1
+ names:
+ kind: AzureIdentity
+ singular: azureidentity
+ plural: azureidentities
+ scope: Namespaced
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: azurepodidentityexceptions.aadpodidentity.k8s.io
+spec:
+ group: aadpodidentity.k8s.io
+ version: v1
+ names:
+ kind: AzurePodIdentityException
+ singular: azurepodidentityexception
+ plural: azurepodidentityexceptions
+ scope: Namespaced
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: nmi
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ component: nmi
+ tier: node
+ template:
+ metadata:
+ labels:
+ component: nmi
+ tier: node
+ spec:
+ hostNetwork: true
+ volumes:
+ - hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ name: iptableslock
+ containers:
+ - name: nmi
+ image: "mcr.microsoft.com/k8s/aad-pod-identity/nmi:1.5.3"
+ imagePullPolicy: Always
+ args:
+ - "--host-ip=$(HOST_IP)"
+ - "--node=$(NODE_NAME)"
+ env:
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ privileged: true
+ capabilities:
+ add:
+ - NET_ADMIN
+ resources:
+ limits:
+ cpu: 200m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - mountPath: /run/xtables.lock
+ name: iptableslock
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ initialDelaySeconds: 10
+ periodSeconds: 5
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ component: mic
+ name: mic
+ namespace: kube-system
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ component: mic
+ template:
+ metadata:
+ labels:
+ component: mic
+ spec:
+ containers:
+ - name: mic
+ image: "mcr.microsoft.com/k8s/aad-pod-identity/mic:1.5.3"
+ imagePullPolicy: Always
+ args:
+ - "--kubeconfig=/etc/kubernetes/kubeconfig/kubeconfig"
+ - "--cloudconfig=/etc/kubernetes/azure.json"
+ - "--logtostderr"
+ resources:
+ limits:
+ cpu: 200m
+ memory: 1024Mi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: kubeconfig
+ mountPath: /etc/kubernetes/kubeconfig
+ readOnly: true
+ - name: certificates
+ mountPath: /etc/kubernetes/certs
+ readOnly: true
+ - name: k8s-azure-file
+ mountPath: /etc/kubernetes/azure.json
+ readOnly: true
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ volumes:
+ - name: kubeconfig
+ hostPath:
+ path: /var/lib/kubelet
+ - name: certificates
+ hostPath:
+ path: /etc/kubernetes/certs
+ - name: k8s-azure-file
+ hostPath:
+ path: /etc/kubernetes/azure.json
+ nodeSelector:
+ beta.kubernetes.io/os: linux
diff --git a/terraform/system/azure/identity/main.tf b/terraform/system/azure/identity/main.tf
new file mode 100644
index 0000000..2fc4ea4
--- /dev/null
+++ b/terraform/system/azure/identity/main.tf
@@ -0,0 +1,27 @@
+provider "kubernetes" {
+ version = "~> 1.8"
+
+ config_path = var.kubeconfig
+}
+
+resource "null_resource" "deployment" {
+ provisioner "local-exec" {
+ when = "create"
+ command = "kubectl apply -f ${path.module}/deployment.yml"
+ environment = {
+ "KUBECONFIG" : var.kubeconfig,
+ }
+ }
+
+ provisioner "local-exec" {
+ when = "destroy"
+ command = "kubectl delete -f ${path.module}/deployment.yml"
+ environment = {
+ "KUBECONFIG" : var.kubeconfig,
+ }
+ }
+
+ triggers = {
+ template = filesha256("${path.module}/deployment.yml")
+ }
+}
diff --git a/terraform/system/azure/identity/outputs.tf b/terraform/system/azure/identity/outputs.tf
new file mode 100644
index 0000000..2647af4
--- /dev/null
+++ b/terraform/system/azure/identity/outputs.tf
@@ -0,0 +1,3 @@
+output "id" {
+ value = null_resource.deployment.id
+}
diff --git a/terraform/system/azure/identity/variables.tf b/terraform/system/azure/identity/variables.tf
new file mode 100644
index 0000000..cbd0f35
--- /dev/null
+++ b/terraform/system/azure/identity/variables.tf
@@ -0,0 +1,3 @@
+variable "kubeconfig" {
+ type = string
+}
diff --git a/terraform/system/azure/main.tf b/terraform/system/azure/main.tf
new file mode 100644
index 0000000..3f68756
--- /dev/null
+++ b/terraform/system/azure/main.tf
@@ -0,0 +1,69 @@
+provider "azurerm" {
+ version = "~> 1.36"
+}
+
+provider "http" {
+ version = "~> 1.1"
+}
+
+provider "kubernetes" {
+ version = "~> 1.9"
+
+ config_path = module.cluster.kubeconfig
+}
+
+data "http" "releases" {
+ url = "https://api.github.com/repos/convox/convox/releases"
+}
+
+locals {
+ current = jsondecode(data.http.releases.body).0.tag_name
+ release = coalesce(var.release, local.current)
+}
+
+data "azurerm_client_config" "current" {}
+
+resource "azurerm_resource_group" "rack" {
+ name = var.name
+ location = var.region
+}
+
+module "cluster" {
+ source = "../../cluster/azure"
+
+ providers = {
+ azurerm = azurerm
+ }
+
+ name = var.name
+ node_type = var.node_type
+ region = var.region
+ resource_group = azurerm_resource_group.rack.name
+}
+
+# module "identity" {
+# source = "./identity"
+
+# providers = {
+# kubernetes = kubernetes
+# }
+
+# kubeconfig = module.cluster.kubeconfig
+# }
+
+module "rack" {
+ source = "../../rack/azure"
+
+ providers = {
+ azurerm = azurerm
+ kubernetes = kubernetes
+ }
+
+ # identity = module.identity.id
+ kubeconfig = module.cluster.kubeconfig
+ name = var.name
+ region = var.region
+ release = local.release
+ resource_group = azurerm_resource_group.rack.name
+ workspace = module.cluster.workspace
+}
diff --git a/terraform/system/azure/outputs.tf b/terraform/system/azure/outputs.tf
new file mode 100644
index 0000000..c229346
--- /dev/null
+++ b/terraform/system/azure/outputs.tf
@@ -0,0 +1,7 @@
+output "api" {
+ value = module.rack.api
+}
+
+output "endpoint" {
+ value = module.rack.endpoint
+}
diff --git a/terraform/system/azure/variables.tf b/terraform/system/azure/variables.tf
new file mode 100644
index 0000000..23ade21
--- /dev/null
+++ b/terraform/system/azure/variables.tf
@@ -0,0 +1,15 @@
+variable "name" {
+ type = "string"
+}
+
+variable "node_type" {
+ type = "string"
+}
+
+variable "region" {
+ type = "string"
+}
+
+variable "release" {
+ default = ""
+}
diff --git a/vendor/github.com/Azure/azure-pipeline-go/LICENSE b/vendor/github.com/Azure/azure-pipeline-go/LICENSE
new file mode 100644
index 0000000..d1ca00f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
new file mode 100644
index 0000000..d7b866c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
@@ -0,0 +1,284 @@
+package pipeline
+
+import (
+ "context"
+ "github.com/mattn/go-ieproxy"
+ "net"
+ "net/http"
+ "os"
+ "time"
+)
+
+// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
+// requires that this Factory create a new instance of its Policy object.
+type Factory interface {
+ New(next Policy, po *PolicyOptions) Policy
+}
+
+// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
+type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
+
+// New calls f(next,po).
+func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
+ return f(next, po)
+}
+
+// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
+// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
+// Response goes backward through the linked-list for additional processing.
+// NOTE: Request is passed by value so changes do not change the caller's version of
+// the request. However, Request has some fields that reference mutable objects (not strings).
+// These references are copied; a deep copy is not performed. Specifically, this means that
+// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
+// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
+type Policy interface {
+ Do(ctx context.Context, request Request) (Response, error)
+}
+
+// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
+type PolicyFunc func(ctx context.Context, request Request) (Response, error)
+
+// Do calls f(ctx, request).
+func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
+ return f(ctx, request)
+}
+
+// Options configures a Pipeline's behavior.
+type Options struct {
+ HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
+ Log LogOptions
+}
+
+// LogLevel tells a logger the minimum level to log. When code reports a log entry,
+// the LogLevel indicates the level of the log entry. The logger only records entries
+// whose level is at least the level it was told to log. See the Log* constants.
+// For example, if a logger is configured with LogError, then LogError, LogPanic,
+// and LogFatal entries will be logged; lower level entries are ignored.
+type LogLevel uint32
+
+const (
+ // LogNone tells a logger not to log any entries passed to it.
+ LogNone LogLevel = iota
+
+ // LogFatal tells a logger to log all LogFatal entries passed to it.
+ LogFatal
+
+ // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
+ LogPanic
+
+ // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
+ LogError
+
+ // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
+ LogWarning
+
+ // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
+ LogInfo
+
+ // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
+ LogDebug
+)
+
+// LogOptions configures the pipeline's logging mechanism & level filtering.
+type LogOptions struct {
+ Log func(level LogLevel, message string)
+
+ // ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
+ // An application can return different values over the its lifetime; this allows the application to dynamically
+ // alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
+ // you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
+ // Usually, the function will be implemented simply like this: return level <= LogWarning
+ ShouldLog func(level LogLevel) bool
+}
+
+type pipeline struct {
+ factories []Factory
+ options Options
+}
+
+// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
+// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
+// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
+// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
+// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
+//
+// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
+// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
+// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
+// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
+type Pipeline interface {
+ Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
+}
+
+// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
+func NewPipeline(factories []Factory, o Options) Pipeline {
+ if o.HTTPSender == nil {
+ o.HTTPSender = newDefaultHTTPClientFactory()
+ }
+ if o.Log.Log == nil {
+ o.Log.Log = func(LogLevel, string) {} // No-op logger
+ }
+ return &pipeline{factories: factories, options: o}
+}
+
+// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
+// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
+// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
+// ultimately sends the transformed HTTP request over the network.
+func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
+ response, err := p.newPolicies(methodFactory).Do(ctx, request)
+ request.close()
+ return response, err
+}
+
+func (p *pipeline) newPolicies(methodFactory Factory) Policy {
+ // The last Policy is the one that actually sends the request over the wire and gets the response.
+ // It is overridable via the Options' HTTPSender field.
+ po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
+ next := p.options.HTTPSender.New(nil, po)
+
+ // Walk over the slice of Factory objects in reverse (from wire to API)
+ markers := 0
+ for i := len(p.factories) - 1; i >= 0; i-- {
+ factory := p.factories[i]
+ if _, ok := factory.(methodFactoryMarker); ok {
+ markers++
+ if markers > 1 {
+ panic("MethodFactoryMarker can only appear once in the pipeline")
+ }
+ if methodFactory != nil {
+ // Replace MethodFactoryMarker with passed-in methodFactory
+ next = methodFactory.New(next, po)
+ }
+ } else {
+ // Use the slice's Factory to construct its Policy
+ next = factory.New(next, po)
+ }
+ }
+
+ // Each Factory has created its Policy
+ if markers == 0 && methodFactory != nil {
+ panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
+ }
+ return next // Return head of the Policy object linked-list
+}
+
+// A PolicyOptions represents optional information that can be used by a node in the
+// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
+// which passes it (if desired) to the Policy object it creates. Today, the Policy object
+// uses the options to perform logging. But, in the future, this could be used for more.
+type PolicyOptions struct {
+ pipeline *pipeline
+}
+
+// ShouldLog returns true if the specified log level should be logged.
+func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
+ if po.pipeline.options.Log.ShouldLog != nil {
+ return po.pipeline.options.Log.ShouldLog(level)
+ }
+ return false
+}
+
+// Log logs a string to the Pipeline's Logger.
+func (po *PolicyOptions) Log(level LogLevel, msg string) {
+ if !po.ShouldLog(level) {
+ return // Short circuit message formatting if we're not logging it
+ }
+
+ // We are logging it, ensure trailing newline
+ if len(msg) == 0 || msg[len(msg)-1] != '\n' {
+ msg += "\n" // Ensure trailing newline
+ }
+ po.pipeline.options.Log.Log(level, msg)
+
+ // If logger doesn't handle fatal/panic, we'll do it here.
+ if level == LogFatal {
+ os.Exit(1)
+ } else if level == LogPanic {
+ panic(msg)
+ }
+}
+
+var pipelineHTTPClient = newDefaultHTTPClient()
+
+func newDefaultHTTPClient() *http.Client {
+ // We want the Transport to have a large connection pool
+ return &http.Client{
+ Transport: &http.Transport{
+ Proxy: ieproxy.GetProxyFunc(),
+ // We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
+ Dial /*Context*/ : (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).Dial, /*Context*/
+ MaxIdleConns: 0, // No limit
+ MaxIdleConnsPerHost: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ DisableKeepAlives: false,
+ DisableCompression: false,
+ MaxResponseHeaderBytes: 0,
+ //ResponseHeaderTimeout: time.Duration{},
+ //ExpectContinueTimeout: time.Duration{},
+ },
+ }
+}
+
+// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
+func newDefaultHTTPClientFactory() Factory {
+ return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
+ return func(ctx context.Context, request Request) (Response, error) {
+ r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
+ if err != nil {
+ err = NewError(err, "HTTP request failed")
+ }
+ return NewHTTPResponse(r), err
+ }
+ })
+}
+
+var mfm = methodFactoryMarker{} // Singleton
+
+// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
+// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
+// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
+func MethodFactoryMarker() Factory {
+ return mfm
+}
+
+type methodFactoryMarker struct {
+}
+
+func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
+ panic("methodFactoryMarker policy should have been replaced with a method policy")
+}
+
+// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
+// By default no implemetation is provided here, because pipeline may be used in many different
+// contexts, so the correct implementation is context-dependent
+type LogSanitizer interface {
+ SanitizeLogMessage(raw string) string
+}
+
+var sanitizer LogSanitizer
+var enableForceLog bool = true
+
+// SetLogSanitizer can be called to supply a custom LogSanitizer.
+// There is no threadsafety or locking on the underlying variable,
+// so call this function just once at startup of your application
+// (Don't later try to change the sanitizer on the fly).
+func SetLogSanitizer(s LogSanitizer)(){
+ sanitizer = s
+}
+
+// SetForceLogEnabled can be used to disable ForceLog
+// There is no threadsafety or locking on the underlying variable,
+// so call this function just once at startup of your application
+// (Don't later try to change the setting on the fly).
+func SetForceLogEnabled(enable bool)() {
+ enableForceLog = enable
+}
+
+
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
new file mode 100644
index 0000000..e7ce497
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
@@ -0,0 +1,14 @@
+package pipeline
+
+
+// ForceLog should rarely be used. It forceable logs an entry to the
+// Windows Event Log (on Windows) or to the SysLog (on Linux)
+func ForceLog(level LogLevel, msg string) {
+ if !enableForceLog {
+ return
+ }
+ if sanitizer != nil {
+ msg = sanitizer.SanitizeLogMessage(msg)
+ }
+ forceLog(level, msg)
+}
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
new file mode 100644
index 0000000..819509a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
@@ -0,0 +1,33 @@
+// +build !windows,!nacl,!plan9
+
+package pipeline
+
+import (
+ "log"
+ "log/syslog"
+)
+
+// forceLog should rarely be used. It forceable logs an entry to the
+// Windows Event Log (on Windows) or to the SysLog (on Linux)
+func forceLog(level LogLevel, msg string) {
+ if defaultLogger == nil {
+ return // Return fast if we failed to create the logger.
+ }
+ // We are logging it, ensure trailing newline
+ if len(msg) == 0 || msg[len(msg)-1] != '\n' {
+ msg += "\n" // Ensure trailing newline
+ }
+ switch level {
+ case LogFatal:
+ defaultLogger.Fatal(msg)
+ case LogPanic:
+ defaultLogger.Panic(msg)
+ case LogError, LogWarning, LogInfo:
+ defaultLogger.Print(msg)
+ }
+}
+
+var defaultLogger = func() *log.Logger {
+ l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
+ return l
+}()
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
new file mode 100644
index 0000000..5fcf400
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
@@ -0,0 +1,61 @@
+package pipeline
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// forceLog should rarely be used. It forceable logs an entry to the
+// Windows Event Log (on Windows) or to the SysLog (on Linux)
+func forceLog(level LogLevel, msg string) {
+ var el eventType
+ switch level {
+ case LogError, LogFatal, LogPanic:
+ el = elError
+ case LogWarning:
+ el = elWarning
+ case LogInfo:
+ el = elInfo
+ }
+ // We are logging it, ensure trailing newline
+ if len(msg) == 0 || msg[len(msg)-1] != '\n' {
+ msg += "\n" // Ensure trailing newline
+ }
+ reportEvent(el, 0, msg)
+}
+
+type eventType int16
+
+const (
+ elSuccess eventType = 0
+ elError eventType = 1
+ elWarning eventType = 2
+ elInfo eventType = 4
+)
+
+var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
+ advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
+ registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
+
+ sourceName, _ := os.Executable()
+ sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
+ handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
+ if lastErr == nil { // On error, logging is a no-op
+ return func(eventType eventType, eventID int32, msg string) {}
+ }
+ reportEvent := advAPI32.MustFindProc("ReportEventW")
+ return func(eventType eventType, eventID int32, msg string) {
+ s, _ := syscall.UTF16PtrFromString(msg)
+ _, _, _ = reportEvent.Call(
+ uintptr(handle), // HANDLE hEventLog
+ uintptr(eventType), // WORD wType
+ uintptr(0), // WORD wCategory
+ uintptr(eventID), // DWORD dwEventID
+ uintptr(0), // PSID lpUserSid
+ uintptr(1), // WORD wNumStrings
+ uintptr(0), // DWORD dwDataSize
+ uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
+ uintptr(0)) // LPVOID lpRawData
+ }
+}()
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
new file mode 100644
index 0000000..b5ab05f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
@@ -0,0 +1,161 @@
+// Copyright 2017 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by an MIT
+// license that can be found in the LICENSE file.
+
+/*
+Package pipeline implements an HTTP request/response middleware pipeline whose
+policy objects mutate an HTTP request's URL, query parameters, and/or headers before
+the request is sent over the wire.
+
+Not all policy objects mutate an HTTP request; some policy objects simply impact the
+flow of requests/responses by performing operations such as logging, retry policies,
+timeouts, failure injection, and deserialization of response payloads.
+
+Implementing the Policy Interface
+
+To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
+method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
+operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
+parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
+in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
+object sends the HTTP request over the network (by calling the HTTPSender's Do method).
+
+When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
+(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
+or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
+to the code that initiated the original HTTP request.
+
+Here is a template for how to define a pipeline.Policy object:
+
+ type myPolicy struct {
+ node PolicyNode
+ // TODO: Add configuration/setting fields here (if desired)...
+ }
+
+ func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
+ // TODO: Mutate/process the HTTP request here...
+ response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
+ // TODO: Mutate/process the HTTP response here...
+ return response, err // Return response/error to previous Policy
+ }
+
+Implementing the Factory Interface
+
+Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
+method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
+passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
+a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
+passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
+
+Here is a template for how to define a pipeline.Policy object:
+
+ // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
+ // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
+ type myPolicyFactory struct {
+ // TODO: Add any configuration/setting fields if desired...
+ }
+
+ func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
+ return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
+ }
+
+Using your Factory and Policy objects via a Pipeline
+
+To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
+this slice to the pipeline.NewPipeline function.
+
+ func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
+
+This function also requires an object implementing the HTTPSender interface. For simple scenarios,
+passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
+send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
+object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
+or other objects that can simulate the network requests for testing purposes.
+
+Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
+wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
+context.Context for cancelling the HTTP request (if desired).
+
+ type Pipeline interface {
+ Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
+ }
+
+Do iterates over the slice of Factory objects and tells each one to create its corresponding
+Policy object. After the linked-list of Policy objects have been created, Do calls the first
+Policy object passing it the Context & HTTP request parameters. These parameters now flow through
+all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
+The last Policy object sends the message over the network.
+
+When the network operation completes, the HTTP response and error return values pass
+back through the same Policy objects in reverse order. Most Policy objects ignore the
+response/error but some log the result, retry the operation (depending on the exact
+reason the operation failed), or deserialize the response's body. Your own Policy
+objects can do whatever they like when processing outgoing requests or incoming responses.
+
+Note that after an I/O request runs to completion, the Policy objects for that request
+are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
+them to be created once and reused over many I/O operations. This allows for efficient use of
+memory and also makes them safely usable by multiple goroutines concurrently.
+
+Inserting a Method-Specific Factory into the Linked-List of Policy Objects
+
+While Pipeline and Factory objects can be reused over many different operations, it is
+common to have special behavior for a specific operation/method. For example, a method
+may need to deserialize the response's body to an instance of a specific data type.
+To accommodate this, the Pipeline's Do method takes an additional method-specific
+Factory object. The Do method tells this Factory to create a Policy object and
+injects this method-specific Policy object into the linked-list of Policy objects.
+
+When creating a Pipeline object, the slice of Factory objects passed must have 1
+(and only 1) entry marking where the method-specific Factory should be injected.
+The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
+
+ func MethodFactoryMarker() pipeline.Factory
+
+Creating an HTTP Request Object
+
+The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
+Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
+http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
+
+ func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
+
+To this function, you must pass a pipeline.RequestOptions that looks like this:
+
+ type RequestOptions struct {
+ // The readable and seekable stream to be sent to the server as the request's body.
+ Body io.ReadSeeker
+
+ // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
+ Progress ProgressReceiver
+ }
+
+The method and struct ensure that the request's body stream is a read/seekable stream.
+A seekable stream is required so that upon retry, the final Policy object can seek
+the stream back to the beginning before retrying the network request and re-uploading the
+body. In addition, you can associate a ProgressReceiver callback function which will be
+invoked periodically to report progress while bytes are being read from the body stream
+and sent over the network.
+
+Processing the HTTP Response
+
+When an HTTP response comes in from the network, a reference to Go's http.Response struct is
+embedded in a struct that implements the pipeline.Response interface:
+
+ type Response interface {
+ Response() *http.Response
+ }
+
+This interface is returned through all the Policy objects. Each Policy object can call the Response
+interface's Response method to examine (or mutate) the embedded http.Response object.
+
+A Policy object can internally define another struct (implementing the pipeline.Response interface)
+that embeds an http.Response and adds additional fields and return this structure to other Policy
+objects. This allows a Policy object to deserialize the body to some other struct and return the
+original http.Response and the additional struct back through the Policy chain. Other Policy objects
+can see the Response but cannot see the additional struct with the deserialized body. After all the
+Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
+The caller of this method can perform a type assertion attempting to get back to the struct type
+really returned by the Policy object. If the type assertion is successful, the caller now has
+access to both the http.Response and the deserialized struct object.*/
+package pipeline
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
new file mode 100644
index 0000000..4aaf066
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
@@ -0,0 +1,181 @@
+package pipeline
+
+import (
+ "fmt"
+ "runtime"
+)
+
+type causer interface {
+ Cause() error
+}
+
+func errorWithPC(msg string, pc uintptr) string {
+ s := ""
+ if fn := runtime.FuncForPC(pc); fn != nil {
+ file, line := fn.FileLine(pc)
+ s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
+ }
+ s += msg + "\n\n"
+ return s
+}
+
+func getPC(callersToSkip int) uintptr {
+ // Get the PC of Initialize method's caller.
+ pc := [1]uintptr{}
+ _ = runtime.Callers(callersToSkip, pc[:])
+ return pc[0]
+}
+
+// ErrorNode can be an embedded field in a private error object. This field
+// adds Program Counter support and a 'cause' (reference to a preceding error).
+// When initializing a error type with this embedded field, initialize the
+// ErrorNode field by calling ErrorNode{}.Initialize(cause).
+type ErrorNode struct {
+ pc uintptr // Represents a Program Counter that you can get symbols for.
+ cause error // Refers to the preceding error (or nil)
+}
+
+// Error returns a string with the PC's symbols or "" if the PC is invalid.
+// When defining a new error type, have its Error method call this one passing
+// it the string representation of the error.
+func (e *ErrorNode) Error(msg string) string {
+ s := errorWithPC(msg, e.pc)
+ if e.cause != nil {
+ s += e.cause.Error() + "\n"
+ }
+ return s
+}
+
+// Cause returns the error that preceded this error.
+func (e *ErrorNode) Cause() error { return e.cause }
+
+// Temporary returns true if the error occurred due to a temporary condition.
+func (e ErrorNode) Temporary() bool {
+ type temporary interface {
+ Temporary() bool
+ }
+
+ for err := e.cause; err != nil; {
+ if t, ok := err.(temporary); ok {
+ return t.Temporary()
+ }
+
+ if cause, ok := err.(causer); ok {
+ err = cause.Cause()
+ } else {
+ err = nil
+ }
+ }
+ return false
+}
+
+// Timeout returns true if the error occurred due to time expiring.
+func (e ErrorNode) Timeout() bool {
+ type timeout interface {
+ Timeout() bool
+ }
+
+ for err := e.cause; err != nil; {
+ if t, ok := err.(timeout); ok {
+ return t.Timeout()
+ }
+
+ if cause, ok := err.(causer); ok {
+ err = cause.Cause()
+ } else {
+ err = nil
+ }
+ }
+ return false
+}
+
+// Initialize is used to initialize an embedded ErrorNode field.
+// It captures the caller's program counter and saves the cause (preceding error).
+// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
+// value of 3 is very common; but, depending on your code nesting, you may need
+// a different value.
+func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
+ pc := getPC(callersToSkip)
+ return ErrorNode{pc: pc, cause: cause}
+}
+
+// Cause walks all the preceding errors and return the originating error.
+func Cause(err error) error {
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
+
+// ErrorNodeNoCause can be an embedded field in a private error object. This field
+// adds Program Counter support.
+// When initializing a error type with this embedded field, initialize the
+// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
+type ErrorNodeNoCause struct {
+ pc uintptr // Represents a Program Counter that you can get symbols for.
+}
+
+// Error returns a string with the PC's symbols or "" if the PC is invalid.
+// When defining a new error type, have its Error method call this one passing
+// it the string representation of the error.
+func (e *ErrorNodeNoCause) Error(msg string) string {
+ return errorWithPC(msg, e.pc)
+}
+
+// Temporary returns true if the error occurred due to a temporary condition.
+func (e ErrorNodeNoCause) Temporary() bool {
+ return false
+}
+
+// Timeout returns true if the error occurred due to time expiring.
+func (e ErrorNodeNoCause) Timeout() bool {
+ return false
+}
+
+// Initialize is used to initialize an embedded ErrorNode field.
+// It captures the caller's program counter.
+// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
+// value of 3 is very common; but, depending on your code nesting, you may need
+// a different value.
+func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause {
+ pc := getPC(callersToSkip)
+ return ErrorNodeNoCause{pc: pc}
+}
+
+// NewError creates a simple string error (like Error.New). But, this
+// error also captures the caller's Program Counter and the preceding error (if provided).
+func NewError(cause error, msg string) error {
+ if cause != nil {
+ return &pcError{
+ ErrorNode: ErrorNode{}.Initialize(cause, 3),
+ msg: msg,
+ }
+ }
+ return &pcErrorNoCause{
+ ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3),
+ msg: msg,
+ }
+}
+
+// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
+type pcError struct {
+ ErrorNode
+ msg string
+}
+
+// Error satisfies the error interface. It shows the error with Program Counter
+// symbols and calls Error on the preceding error so you can see the full error chain.
+func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
+
+// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
+type pcErrorNoCause struct {
+ ErrorNodeNoCause
+ msg string
+}
+
+// Error satisfies the error interface. It shows the error with Program Counter symbols.
+func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) }
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
new file mode 100644
index 0000000..efa3c8e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
@@ -0,0 +1,82 @@
+package pipeline
+
+import "io"
+
+// ********** The following is common between the request body AND the response body.
+
+// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
+type ProgressReceiver func(bytesTransferred int64)
+
+// ********** The following are specific to the request body (a ReadSeekCloser)
+
+// This struct is used when sending a body to the network
+type requestBodyProgress struct {
+ requestBody io.ReadSeeker // Seeking is required to support retries
+ pr ProgressReceiver
+}
+
+// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
+func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
+ if pr == nil {
+ panic("pr must not be nil")
+ }
+ return &requestBodyProgress{requestBody: requestBody, pr: pr}
+}
+
+// Read reads a block of data from an inner stream and reports progress
+func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
+ n, err = rbp.requestBody.Read(p)
+ if err != nil {
+ return
+ }
+ // Invokes the user's callback method to report progress
+ position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
+ if err != nil {
+ panic(err)
+ }
+ rbp.pr(position)
+ return
+}
+
+func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
+ return rbp.requestBody.Seek(offset, whence)
+}
+
+// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
+func (rbp *requestBodyProgress) Close() error {
+ if c, ok := rbp.requestBody.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+// ********** The following are specific to the response body (a ReadCloser)
+
+// This struct is used when sending a body to the network
+type responseBodyProgress struct {
+ responseBody io.ReadCloser
+ pr ProgressReceiver
+ offset int64
+}
+
+// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
+func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
+ if pr == nil {
+ panic("pr must not be nil")
+ }
+ return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
+}
+
+// Read reads a block of data from an inner stream and reports progress
+func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
+ n, err = rbp.responseBody.Read(p)
+ rbp.offset += int64(n)
+
+ // Invokes the user's callback method to report progress
+ rbp.pr(rbp.offset)
+ return
+}
+
+func (rbp *responseBodyProgress) Close() error {
+ return rbp.responseBody.Close()
+}
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
new file mode 100644
index 0000000..1fbe72b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
@@ -0,0 +1,147 @@
+package pipeline
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
+type Request struct {
+ *http.Request
+}
+
+// NewRequest initializes a new HTTP request object with any desired options.
+func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
+ // Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
+
+ // This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
+ request.Request = &http.Request{
+ Method: method,
+ URL: &url,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: url.Host,
+ }
+
+ if body != nil {
+ err = request.SetBody(body)
+ }
+ return
+}
+
+// SetBody sets the body and content length, assumes body is not nil.
+func (r Request) SetBody(body io.ReadSeeker) error {
+ size, err := body.Seek(0, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+
+ body.Seek(0, io.SeekStart)
+ r.ContentLength = size
+ r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
+
+ if size != 0 {
+ r.Body = &retryableRequestBody{body: body}
+ r.GetBody = func() (io.ReadCloser, error) {
+ _, err := body.Seek(0, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+ return r.Body, nil
+ }
+ } else {
+ // in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
+ r.Body = http.NoBody
+ r.GetBody = func() (io.ReadCloser, error) {
+ return http.NoBody, nil
+ }
+
+ // close the user-provided empty body
+ if c, ok := body.(io.Closer); ok {
+ c.Close()
+ }
+ }
+
+ return nil
+}
+
+// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
+// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
+// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
+// Cancel, Response, and ctx fields. Copy panics if any of these fields are
+// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
+func (r Request) Copy() Request {
+ if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
+ panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
+ "TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
+ }
+ copy := *r.Request // Copy the request
+ urlCopy := *(r.Request.URL) // Copy the URL
+ copy.URL = &urlCopy
+ copy.Header = http.Header{} // Copy the header
+ for k, vs := range r.Header {
+ for _, value := range vs {
+ copy.Header.Add(k, value)
+ }
+ }
+ return Request{Request: ©} // Return the copy
+}
+
+func (r Request) close() error {
+ if r.Body != nil && r.Body != http.NoBody {
+ c, ok := r.Body.(*retryableRequestBody)
+ if !ok {
+ panic("unexpected request body type (should be *retryableReadSeekerCloser)")
+ }
+ return c.realClose()
+ }
+ return nil
+}
+
+// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
+func (r Request) RewindBody() error {
+ if r.Body != nil && r.Body != http.NoBody {
+ s, ok := r.Body.(io.Seeker)
+ if !ok {
+ panic("unexpected request body type (should be io.Seeker)")
+ }
+
+ // Reset the stream back to the beginning
+ _, err := s.Seek(0, io.SeekStart)
+ return err
+ }
+ return nil
+}
+
+// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
+
+// This struct is used when sending a body to the network
+type retryableRequestBody struct {
+ body io.ReadSeeker // Seeking is required to support retries
+}
+
+// Read reads a block of data from an inner stream and reports progress
+func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
+ return b.body.Read(p)
+}
+
+func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
+ return b.body.Seek(offset, whence)
+}
+
+func (b *retryableRequestBody) Close() error {
+ // We don't want the underlying transport to close the request body on transient failures so this is a nop.
+ // The pipeline closes the request body upon success.
+ return nil
+}
+
+func (b *retryableRequestBody) realClose() error {
+ if c, ok := b.body.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
new file mode 100644
index 0000000..f2dc164
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
@@ -0,0 +1,74 @@
+package pipeline
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
+// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
+// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
+// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
+// to the expected struct and returns the struct to its caller.
+type Response interface {
+ Response() *http.Response
+}
+
+// This is the default struct that has the http.Response.
+// A method can replace this struct with its own struct containing an http.Response
+// field and any other additional fields.
+type httpResponse struct {
+ response *http.Response
+}
+
+// NewHTTPResponse is typically called by a Policy object to return a Response object.
+func NewHTTPResponse(response *http.Response) Response {
+ return &httpResponse{response: response}
+}
+
+// This method satisfies the public Response interface's Response method
+func (r httpResponse) Response() *http.Response {
+ return r.response
+}
+
+// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
+// not nil, then these are also written into the Buffer.
+func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
+ // Write the request into the buffer.
+ fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
+ writeHeader(b, request.Header)
+ if response != nil {
+ fmt.Fprintln(b, " --------------------------------------------------------------------------------")
+ fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
+ writeHeader(b, response.Header)
+ }
+ if err != nil {
+ fmt.Fprintln(b, " --------------------------------------------------------------------------------")
+ fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
+ }
+}
+
+// formatHeaders appends an HTTP request's or response's header into a Buffer.
+func writeHeader(b *bytes.Buffer, header map[string][]string) {
+ if len(header) == 0 {
+ b.WriteString(" (no headers)\n")
+ return
+ }
+ keys := make([]string, 0, len(header))
+ // Alphabetize the headers
+ for k := range header {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ // Redact the value of any Authorization header to prevent security information from persisting in logs
+ value := interface{}("REDACTED")
+ if !strings.EqualFold(k, "Authorization") {
+ value = header[k]
+ }
+ fmt.Fprintf(b, " %s: %+v\n", k, value)
+ }
+}
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
new file mode 100644
index 0000000..899f996
--- /dev/null
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
@@ -0,0 +1,9 @@
+package pipeline
+
+const (
+ // UserAgent is the string to be used in the user agent string when making requests.
+ UserAgent = "azure-pipeline-go/" + Version
+
+ // Version is the semantic version (see http://semver.org) of the pipeline package.
+ Version = "0.2.1"
+)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
new file mode 100644
index 0000000..af39a91
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE
new file mode 100644
index 0000000..2d1d726
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE
@@ -0,0 +1,5 @@
+Microsoft Azure-SDK-for-Go
+Copyright 2014-2017 Microsoft
+
+This product includes software developed at
+the Microsoft Corporation (https://www.microsoft.com).
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage/models.go
new file mode 100644
index 0000000..d81a72a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage/models.go
@@ -0,0 +1,623 @@
+// +build go1.9
+
+// Copyright 2019 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was auto-generated by:
+// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
+
+package storage
+
+import (
+ "context"
+
+ original "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
+)
+
+const (
+ DefaultBaseURI = original.DefaultBaseURI
+)
+
+type AccessTier = original.AccessTier
+
+const (
+ Cool AccessTier = original.Cool
+ Hot AccessTier = original.Hot
+)
+
+type AccountExpand = original.AccountExpand
+
+const (
+ AccountExpandGeoReplicationStats AccountExpand = original.AccountExpandGeoReplicationStats
+)
+
+type AccountStatus = original.AccountStatus
+
+const (
+ Available AccountStatus = original.Available
+ Unavailable AccountStatus = original.Unavailable
+)
+
+type Action = original.Action
+
+const (
+ Allow Action = original.Allow
+)
+
+type Action1 = original.Action1
+
+const (
+ Acquire Action1 = original.Acquire
+ Break Action1 = original.Break
+ Change Action1 = original.Change
+ Release Action1 = original.Release
+ Renew Action1 = original.Renew
+)
+
+type Bypass = original.Bypass
+
+const (
+ AzureServices Bypass = original.AzureServices
+ Logging Bypass = original.Logging
+ Metrics Bypass = original.Metrics
+ None Bypass = original.None
+)
+
+type DefaultAction = original.DefaultAction
+
+const (
+ DefaultActionAllow DefaultAction = original.DefaultActionAllow
+ DefaultActionDeny DefaultAction = original.DefaultActionDeny
+)
+
+type DirectoryServiceOptions = original.DirectoryServiceOptions
+
+const (
+ DirectoryServiceOptionsAADDS DirectoryServiceOptions = original.DirectoryServiceOptionsAADDS
+ DirectoryServiceOptionsAD DirectoryServiceOptions = original.DirectoryServiceOptionsAD
+ DirectoryServiceOptionsNone DirectoryServiceOptions = original.DirectoryServiceOptionsNone
+)
+
+type GeoReplicationStatus = original.GeoReplicationStatus
+
+const (
+ GeoReplicationStatusBootstrap GeoReplicationStatus = original.GeoReplicationStatusBootstrap
+ GeoReplicationStatusLive GeoReplicationStatus = original.GeoReplicationStatusLive
+ GeoReplicationStatusUnavailable GeoReplicationStatus = original.GeoReplicationStatusUnavailable
+)
+
+type HTTPProtocol = original.HTTPProtocol
+
+const (
+ HTTPS HTTPProtocol = original.HTTPS
+ Httpshttp HTTPProtocol = original.Httpshttp
+)
+
+type ImmutabilityPolicyState = original.ImmutabilityPolicyState
+
+const (
+ Locked ImmutabilityPolicyState = original.Locked
+ Unlocked ImmutabilityPolicyState = original.Unlocked
+)
+
+type ImmutabilityPolicyUpdateType = original.ImmutabilityPolicyUpdateType
+
+const (
+ Extend ImmutabilityPolicyUpdateType = original.Extend
+ Lock ImmutabilityPolicyUpdateType = original.Lock
+ Put ImmutabilityPolicyUpdateType = original.Put
+)
+
+type KeyPermission = original.KeyPermission
+
+const (
+ Full KeyPermission = original.Full
+ Read KeyPermission = original.Read
+)
+
+type KeySource = original.KeySource
+
+const (
+ MicrosoftKeyvault KeySource = original.MicrosoftKeyvault
+ MicrosoftStorage KeySource = original.MicrosoftStorage
+)
+
+type Kind = original.Kind
+
+const (
+ BlobStorage Kind = original.BlobStorage
+ BlockBlobStorage Kind = original.BlockBlobStorage
+ FileStorage Kind = original.FileStorage
+ Storage Kind = original.Storage
+ StorageV2 Kind = original.StorageV2
+)
+
+type LargeFileSharesState = original.LargeFileSharesState
+
+const (
+ Disabled LargeFileSharesState = original.Disabled
+ Enabled LargeFileSharesState = original.Enabled
+)
+
+type LeaseDuration = original.LeaseDuration
+
+const (
+ Fixed LeaseDuration = original.Fixed
+ Infinite LeaseDuration = original.Infinite
+)
+
+type LeaseState = original.LeaseState
+
+const (
+ LeaseStateAvailable LeaseState = original.LeaseStateAvailable
+ LeaseStateBreaking LeaseState = original.LeaseStateBreaking
+ LeaseStateBroken LeaseState = original.LeaseStateBroken
+ LeaseStateExpired LeaseState = original.LeaseStateExpired
+ LeaseStateLeased LeaseState = original.LeaseStateLeased
+)
+
+type LeaseStatus = original.LeaseStatus
+
+const (
+ LeaseStatusLocked LeaseStatus = original.LeaseStatusLocked
+ LeaseStatusUnlocked LeaseStatus = original.LeaseStatusUnlocked
+)
+
+type ListKeyExpand = original.ListKeyExpand
+
+const (
+ Kerb ListKeyExpand = original.Kerb
+)
+
+type Permissions = original.Permissions
+
+const (
+ A Permissions = original.A
+ C Permissions = original.C
+ D Permissions = original.D
+ L Permissions = original.L
+ P Permissions = original.P
+ R Permissions = original.R
+ U Permissions = original.U
+ W Permissions = original.W
+)
+
+type PrivateEndpointConnectionProvisioningState = original.PrivateEndpointConnectionProvisioningState
+
+const (
+ Creating PrivateEndpointConnectionProvisioningState = original.Creating
+ Deleting PrivateEndpointConnectionProvisioningState = original.Deleting
+ Failed PrivateEndpointConnectionProvisioningState = original.Failed
+ Succeeded PrivateEndpointConnectionProvisioningState = original.Succeeded
+)
+
+type PrivateEndpointServiceConnectionStatus = original.PrivateEndpointServiceConnectionStatus
+
+const (
+ Approved PrivateEndpointServiceConnectionStatus = original.Approved
+ Pending PrivateEndpointServiceConnectionStatus = original.Pending
+ Rejected PrivateEndpointServiceConnectionStatus = original.Rejected
+)
+
+type ProvisioningState = original.ProvisioningState
+
+const (
+ ProvisioningStateCreating ProvisioningState = original.ProvisioningStateCreating
+ ProvisioningStateResolvingDNS ProvisioningState = original.ProvisioningStateResolvingDNS
+ ProvisioningStateSucceeded ProvisioningState = original.ProvisioningStateSucceeded
+)
+
+type PublicAccess = original.PublicAccess
+
+const (
+ PublicAccessBlob PublicAccess = original.PublicAccessBlob
+ PublicAccessContainer PublicAccess = original.PublicAccessContainer
+ PublicAccessNone PublicAccess = original.PublicAccessNone
+)
+
+type Reason = original.Reason
+
+const (
+ AccountNameInvalid Reason = original.AccountNameInvalid
+ AlreadyExists Reason = original.AlreadyExists
+)
+
+type ReasonCode = original.ReasonCode
+
+const (
+ NotAvailableForSubscription ReasonCode = original.NotAvailableForSubscription
+ QuotaID ReasonCode = original.QuotaID
+)
+
+type Services = original.Services
+
+const (
+ B Services = original.B
+ F Services = original.F
+ Q Services = original.Q
+ T Services = original.T
+)
+
+type SignedResource = original.SignedResource
+
+const (
+ SignedResourceB SignedResource = original.SignedResourceB
+ SignedResourceC SignedResource = original.SignedResourceC
+ SignedResourceF SignedResource = original.SignedResourceF
+ SignedResourceS SignedResource = original.SignedResourceS
+)
+
+type SignedResourceTypes = original.SignedResourceTypes
+
+const (
+ SignedResourceTypesC SignedResourceTypes = original.SignedResourceTypesC
+ SignedResourceTypesO SignedResourceTypes = original.SignedResourceTypesO
+ SignedResourceTypesS SignedResourceTypes = original.SignedResourceTypesS
+)
+
+type SkuName = original.SkuName
+
+const (
+ PremiumLRS SkuName = original.PremiumLRS
+ PremiumZRS SkuName = original.PremiumZRS
+ StandardGRS SkuName = original.StandardGRS
+ StandardGZRS SkuName = original.StandardGZRS
+ StandardLRS SkuName = original.StandardLRS
+ StandardRAGRS SkuName = original.StandardRAGRS
+ StandardRAGZRS SkuName = original.StandardRAGZRS
+ StandardZRS SkuName = original.StandardZRS
+)
+
+type SkuTier = original.SkuTier
+
+const (
+ Premium SkuTier = original.Premium
+ Standard SkuTier = original.Standard
+)
+
+type State = original.State
+
+const (
+ StateDeprovisioning State = original.StateDeprovisioning
+ StateFailed State = original.StateFailed
+ StateNetworkSourceDeleted State = original.StateNetworkSourceDeleted
+ StateProvisioning State = original.StateProvisioning
+ StateSucceeded State = original.StateSucceeded
+)
+
+type UsageUnit = original.UsageUnit
+
+const (
+ Bytes UsageUnit = original.Bytes
+ BytesPerSecond UsageUnit = original.BytesPerSecond
+ Count UsageUnit = original.Count
+ CountsPerSecond UsageUnit = original.CountsPerSecond
+ Percent UsageUnit = original.Percent
+ Seconds UsageUnit = original.Seconds
+)
+
+type Account = original.Account
+type AccountCheckNameAvailabilityParameters = original.AccountCheckNameAvailabilityParameters
+type AccountCreateParameters = original.AccountCreateParameters
+type AccountKey = original.AccountKey
+type AccountListKeysResult = original.AccountListKeysResult
+type AccountListResult = original.AccountListResult
+type AccountListResultIterator = original.AccountListResultIterator
+type AccountListResultPage = original.AccountListResultPage
+type AccountProperties = original.AccountProperties
+type AccountPropertiesCreateParameters = original.AccountPropertiesCreateParameters
+type AccountPropertiesUpdateParameters = original.AccountPropertiesUpdateParameters
+type AccountRegenerateKeyParameters = original.AccountRegenerateKeyParameters
+type AccountSasParameters = original.AccountSasParameters
+type AccountUpdateParameters = original.AccountUpdateParameters
+type AccountsClient = original.AccountsClient
+type AccountsCreateFuture = original.AccountsCreateFuture
+type AccountsFailoverFuture = original.AccountsFailoverFuture
+type ActiveDirectoryProperties = original.ActiveDirectoryProperties
+type AzureEntityResource = original.AzureEntityResource
+type AzureFilesIdentityBasedAuthentication = original.AzureFilesIdentityBasedAuthentication
+type BaseClient = original.BaseClient
+type BlobContainer = original.BlobContainer
+type BlobContainersClient = original.BlobContainersClient
+type BlobServiceItems = original.BlobServiceItems
+type BlobServiceProperties = original.BlobServiceProperties
+type BlobServicePropertiesProperties = original.BlobServicePropertiesProperties
+type BlobServicesClient = original.BlobServicesClient
+type ChangeFeed = original.ChangeFeed
+type CheckNameAvailabilityResult = original.CheckNameAvailabilityResult
+type CloudError = original.CloudError
+type CloudErrorBody = original.CloudErrorBody
+type ContainerProperties = original.ContainerProperties
+type CorsRule = original.CorsRule
+type CorsRules = original.CorsRules
+type CustomDomain = original.CustomDomain
+type DateAfterCreation = original.DateAfterCreation
+type DateAfterModification = original.DateAfterModification
+type DeleteRetentionPolicy = original.DeleteRetentionPolicy
+type Dimension = original.Dimension
+type Encryption = original.Encryption
+type EncryptionService = original.EncryptionService
+type EncryptionServices = original.EncryptionServices
+type Endpoints = original.Endpoints
+type ErrorResponse = original.ErrorResponse
+type FileServiceItems = original.FileServiceItems
+type FileServiceProperties = original.FileServiceProperties
+type FileServicePropertiesProperties = original.FileServicePropertiesProperties
+type FileServicesClient = original.FileServicesClient
+type FileShare = original.FileShare
+type FileShareItem = original.FileShareItem
+type FileShareItems = original.FileShareItems
+type FileShareItemsIterator = original.FileShareItemsIterator
+type FileShareItemsPage = original.FileShareItemsPage
+type FileShareProperties = original.FileShareProperties
+type FileSharesClient = original.FileSharesClient
+type GeoReplicationStats = original.GeoReplicationStats
+type IPRule = original.IPRule
+type Identity = original.Identity
+type ImmutabilityPolicy = original.ImmutabilityPolicy
+type ImmutabilityPolicyProperties = original.ImmutabilityPolicyProperties
+type ImmutabilityPolicyProperty = original.ImmutabilityPolicyProperty
+type KeyVaultProperties = original.KeyVaultProperties
+type LeaseContainerRequest = original.LeaseContainerRequest
+type LeaseContainerResponse = original.LeaseContainerResponse
+type LegalHold = original.LegalHold
+type LegalHoldProperties = original.LegalHoldProperties
+type ListAccountSasResponse = original.ListAccountSasResponse
+type ListContainerItem = original.ListContainerItem
+type ListContainerItems = original.ListContainerItems
+type ListContainerItemsIterator = original.ListContainerItemsIterator
+type ListContainerItemsPage = original.ListContainerItemsPage
+type ListServiceSasResponse = original.ListServiceSasResponse
+type ManagementPoliciesClient = original.ManagementPoliciesClient
+type ManagementPolicy = original.ManagementPolicy
+type ManagementPolicyAction = original.ManagementPolicyAction
+type ManagementPolicyBaseBlob = original.ManagementPolicyBaseBlob
+type ManagementPolicyDefinition = original.ManagementPolicyDefinition
+type ManagementPolicyFilter = original.ManagementPolicyFilter
+type ManagementPolicyProperties = original.ManagementPolicyProperties
+type ManagementPolicyRule = original.ManagementPolicyRule
+type ManagementPolicySchema = original.ManagementPolicySchema
+type ManagementPolicySnapShot = original.ManagementPolicySnapShot
+type MetricSpecification = original.MetricSpecification
+type NetworkRuleSet = original.NetworkRuleSet
+type Operation = original.Operation
+type OperationDisplay = original.OperationDisplay
+type OperationListResult = original.OperationListResult
+type OperationProperties = original.OperationProperties
+type OperationsClient = original.OperationsClient
+type PrivateEndpoint = original.PrivateEndpoint
+type PrivateEndpointConnection = original.PrivateEndpointConnection
+type PrivateEndpointConnectionProperties = original.PrivateEndpointConnectionProperties
+type PrivateEndpointConnectionsClient = original.PrivateEndpointConnectionsClient
+type PrivateLinkResource = original.PrivateLinkResource
+type PrivateLinkResourceListResult = original.PrivateLinkResourceListResult
+type PrivateLinkResourceProperties = original.PrivateLinkResourceProperties
+type PrivateLinkResourcesClient = original.PrivateLinkResourcesClient
+type PrivateLinkServiceConnectionState = original.PrivateLinkServiceConnectionState
+type ProxyResource = original.ProxyResource
+type Resource = original.Resource
+type Restriction = original.Restriction
+type SKUCapability = original.SKUCapability
+type ServiceSasParameters = original.ServiceSasParameters
+type ServiceSpecification = original.ServiceSpecification
+type Sku = original.Sku
+type SkuListResult = original.SkuListResult
+type SkusClient = original.SkusClient
+type TagProperty = original.TagProperty
+type TrackedResource = original.TrackedResource
+type UpdateHistoryProperty = original.UpdateHistoryProperty
+type Usage = original.Usage
+type UsageListResult = original.UsageListResult
+type UsageName = original.UsageName
+type UsagesClient = original.UsagesClient
+type VirtualNetworkRule = original.VirtualNetworkRule
+
+func New(subscriptionID string) BaseClient {
+ return original.New(subscriptionID)
+}
+func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
+ return original.NewAccountListResultIterator(page)
+}
+func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
+ return original.NewAccountListResultPage(getNextPage)
+}
+func NewAccountsClient(subscriptionID string) AccountsClient {
+ return original.NewAccountsClient(subscriptionID)
+}
+func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient {
+ return original.NewAccountsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewBlobContainersClient(subscriptionID string) BlobContainersClient {
+ return original.NewBlobContainersClient(subscriptionID)
+}
+func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient {
+ return original.NewBlobContainersClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
+ return original.NewBlobServicesClient(subscriptionID)
+}
+func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
+ return original.NewBlobServicesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewFileServicesClient(subscriptionID string) FileServicesClient {
+ return original.NewFileServicesClient(subscriptionID)
+}
+func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
+ return original.NewFileServicesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
+ return original.NewFileShareItemsIterator(page)
+}
+func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
+ return original.NewFileShareItemsPage(getNextPage)
+}
+func NewFileSharesClient(subscriptionID string) FileSharesClient {
+ return original.NewFileSharesClient(subscriptionID)
+}
+func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
+ return original.NewFileSharesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator {
+ return original.NewListContainerItemsIterator(page)
+}
+func NewListContainerItemsPage(getNextPage func(context.Context, ListContainerItems) (ListContainerItems, error)) ListContainerItemsPage {
+ return original.NewListContainerItemsPage(getNextPage)
+}
+func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient {
+ return original.NewManagementPoliciesClient(subscriptionID)
+}
+func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient {
+ return original.NewManagementPoliciesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return original.NewOperationsClient(subscriptionID)
+}
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient {
+ return original.NewPrivateEndpointConnectionsClient(subscriptionID)
+}
+func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient {
+ return original.NewPrivateEndpointConnectionsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient {
+ return original.NewPrivateLinkResourcesClient(subscriptionID)
+}
+func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient {
+ return original.NewPrivateLinkResourcesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewSkusClient(subscriptionID string) SkusClient {
+ return original.NewSkusClient(subscriptionID)
+}
+func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient {
+ return original.NewSkusClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewUsagesClient(subscriptionID string) UsagesClient {
+ return original.NewUsagesClient(subscriptionID)
+}
+func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
+ return original.NewUsagesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return original.NewWithBaseURI(baseURI, subscriptionID)
+}
+func PossibleAccessTierValues() []AccessTier {
+ return original.PossibleAccessTierValues()
+}
+func PossibleAccountExpandValues() []AccountExpand {
+ return original.PossibleAccountExpandValues()
+}
+func PossibleAccountStatusValues() []AccountStatus {
+ return original.PossibleAccountStatusValues()
+}
+func PossibleAction1Values() []Action1 {
+ return original.PossibleAction1Values()
+}
+func PossibleActionValues() []Action {
+ return original.PossibleActionValues()
+}
+func PossibleBypassValues() []Bypass {
+ return original.PossibleBypassValues()
+}
+func PossibleDefaultActionValues() []DefaultAction {
+ return original.PossibleDefaultActionValues()
+}
+func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions {
+ return original.PossibleDirectoryServiceOptionsValues()
+}
+func PossibleGeoReplicationStatusValues() []GeoReplicationStatus {
+ return original.PossibleGeoReplicationStatusValues()
+}
+func PossibleHTTPProtocolValues() []HTTPProtocol {
+ return original.PossibleHTTPProtocolValues()
+}
+func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState {
+ return original.PossibleImmutabilityPolicyStateValues()
+}
+func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType {
+ return original.PossibleImmutabilityPolicyUpdateTypeValues()
+}
+func PossibleKeyPermissionValues() []KeyPermission {
+ return original.PossibleKeyPermissionValues()
+}
+func PossibleKeySourceValues() []KeySource {
+ return original.PossibleKeySourceValues()
+}
+func PossibleKindValues() []Kind {
+ return original.PossibleKindValues()
+}
+func PossibleLargeFileSharesStateValues() []LargeFileSharesState {
+ return original.PossibleLargeFileSharesStateValues()
+}
+func PossibleLeaseDurationValues() []LeaseDuration {
+ return original.PossibleLeaseDurationValues()
+}
+func PossibleLeaseStateValues() []LeaseState {
+ return original.PossibleLeaseStateValues()
+}
+func PossibleLeaseStatusValues() []LeaseStatus {
+ return original.PossibleLeaseStatusValues()
+}
+func PossibleListKeyExpandValues() []ListKeyExpand {
+ return original.PossibleListKeyExpandValues()
+}
+func PossiblePermissionsValues() []Permissions {
+ return original.PossiblePermissionsValues()
+}
+func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState {
+ return original.PossiblePrivateEndpointConnectionProvisioningStateValues()
+}
+func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
+ return original.PossiblePrivateEndpointServiceConnectionStatusValues()
+}
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return original.PossibleProvisioningStateValues()
+}
+func PossiblePublicAccessValues() []PublicAccess {
+ return original.PossiblePublicAccessValues()
+}
+func PossibleReasonCodeValues() []ReasonCode {
+ return original.PossibleReasonCodeValues()
+}
+func PossibleReasonValues() []Reason {
+ return original.PossibleReasonValues()
+}
+func PossibleServicesValues() []Services {
+ return original.PossibleServicesValues()
+}
+func PossibleSignedResourceTypesValues() []SignedResourceTypes {
+ return original.PossibleSignedResourceTypesValues()
+}
+func PossibleSignedResourceValues() []SignedResource {
+ return original.PossibleSignedResourceValues()
+}
+func PossibleSkuNameValues() []SkuName {
+ return original.PossibleSkuNameValues()
+}
+func PossibleSkuTierValues() []SkuTier {
+ return original.PossibleSkuTierValues()
+}
+func PossibleStateValues() []State {
+ return original.PossibleStateValues()
+}
+func PossibleUsageUnitValues() []UsageUnit {
+ return original.PossibleUsageUnitValues()
+}
+func UserAgent() string {
+ return original.UserAgent() + " profiles/latest"
+}
+func Version() string {
+ return original.Version()
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/client.go
new file mode 100644
index 0000000..54772e1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/client.go
@@ -0,0 +1,49 @@
+// Package operationalinsights implements the Azure ARM Operationalinsights service API version v1.
+//
+// Log Analytics Data Plane Client
+package operationalinsights
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Operationalinsights
+ DefaultBaseURI = "https://api.loganalytics.io/v1"
+)
+
+// BaseClient is the base client for Operationalinsights.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+}
+
+// New creates an instance of the BaseClient client.
+func New() BaseClient {
+ return NewWithBaseURI(DefaultBaseURI)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/models.go
new file mode 100644
index 0000000..20ac5aa
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/models.go
@@ -0,0 +1,95 @@
+package operationalinsights
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
+
+// Column a column in a table.
+type Column struct {
+ // Name - The name of this column.
+ Name *string `json:"name,omitempty"`
+ // Type - The data type of this column.
+ Type *string `json:"type,omitempty"`
+}
+
+// ErrorDetail ...
+type ErrorDetail struct {
+ // Code - The error's code.
+ Code *string `json:"code,omitempty"`
+ // Message - A human readable error message.
+ Message *string `json:"message,omitempty"`
+ // Target - Indicates which property in the request is responsible for the error.
+ Target *string `json:"target,omitempty"`
+ // Value - Indicates which value in 'target' is responsible for the error.
+ Value *string `json:"value,omitempty"`
+ // Resources - Indicates resources which were responsible for the error.
+ Resources *[]string `json:"resources,omitempty"`
+ AdditionalProperties interface{} `json:"additionalProperties,omitempty"`
+}
+
+// ErrorInfo ...
+type ErrorInfo struct {
+ // Code - A machine readable error code.
+ Code *string `json:"code,omitempty"`
+ // Message - A human readable error message.
+ Message *string `json:"message,omitempty"`
+ // Details - error details.
+ Details *[]ErrorDetail `json:"details,omitempty"`
+ // Innererror - Inner error details if they exist.
+ Innererror *ErrorInfo `json:"innererror,omitempty"`
+ AdditionalProperties interface{} `json:"additionalProperties,omitempty"`
+}
+
+// ErrorResponse contains details when the response code indicates an error.
+type ErrorResponse struct {
+ // Error - The error details.
+ Error *ErrorInfo `json:"error,omitempty"`
+}
+
+// QueryBody the Analytics query. Learn more about the [Analytics query
+// syntax](https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/)
+type QueryBody struct {
+ // Query - The query to execute.
+ Query *string `json:"query,omitempty"`
+ // Timespan - Optional. The timespan over which to query data. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the query expression.
+ Timespan *string `json:"timespan,omitempty"`
+ // Workspaces - A list of workspaces that are included in the query.
+ Workspaces *[]string `json:"workspaces,omitempty"`
+}
+
+// QueryResults contains the tables, columns & rows resulting from a query.
+type QueryResults struct {
+ autorest.Response `json:"-"`
+ // Tables - The list of tables, columns and rows.
+ Tables *[]Table `json:"tables,omitempty"`
+}
+
+// Table contains the columns and rows for one table in a query response.
+type Table struct {
+ // Name - The name of the table.
+ Name *string `json:"name,omitempty"`
+ // Columns - The list of columns in this table.
+ Columns *[]Column `json:"columns,omitempty"`
+ // Rows - The resulting rows from this query.
+ Rows *[][]interface{} `json:"rows,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/query.go b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/query.go
new file mode 100644
index 0000000..6e99939
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/query.go
@@ -0,0 +1,121 @@
+package operationalinsights
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// QueryClient is the log Analytics Data Plane Client
+type QueryClient struct {
+ BaseClient
+}
+
+// NewQueryClient creates an instance of the QueryClient client.
+func NewQueryClient() QueryClient {
+ return NewQueryClientWithBaseURI(DefaultBaseURI)
+}
+
+// NewQueryClientWithBaseURI creates an instance of the QueryClient client.
+func NewQueryClientWithBaseURI(baseURI string) QueryClient {
+ return QueryClient{NewWithBaseURI(baseURI)}
+}
+
+// Execute executes an Analytics query for data. [Here](https://dev.loganalytics.io/documentation/Using-the-API) is an
+// example for using POST with an Analytics query.
+// Parameters:
+// workspaceID - ID of the workspace. This is Workspace ID from the Properties blade in the Azure portal.
+// body - the Analytics query. Learn more about the [Analytics query
+// syntax](https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/)
+func (client QueryClient) Execute(ctx context.Context, workspaceID string, body QueryBody) (result QueryResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueryClient.Execute")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: body,
+ Constraints: []validation.Constraint{{Target: "body.Query", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("operationalinsights.QueryClient", "Execute", err.Error())
+ }
+
+ req, err := client.ExecutePreparer(ctx, workspaceID, body)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "operationalinsights.QueryClient", "Execute", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ExecuteSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "operationalinsights.QueryClient", "Execute", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ExecuteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "operationalinsights.QueryClient", "Execute", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ExecutePreparer prepares the Execute request.
+func (client QueryClient) ExecutePreparer(ctx context.Context, workspaceID string, body QueryBody) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "workspaceId": autorest.Encode("path", workspaceID),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/workspaces/{workspaceId}/query", pathParameters),
+ autorest.WithJSON(body))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ExecuteSender sends the Execute request. The method will close the
+// http.Response Body if it receives an error.
+func (client QueryClient) ExecuteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ExecuteResponder handles the response to the Execute request. The method always
+// closes the http.Response Body.
+func (client QueryClient) ExecuteResponder(resp *http.Response) (result QueryResults, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/version.go
new file mode 100644
index 0000000..da792d0
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/version.go
@@ -0,0 +1,30 @@
+package operationalinsights
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " operationalinsights/v1"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/accounts.go
new file mode 100644
index 0000000..2ac4600
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/accounts.go
@@ -0,0 +1,1311 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// AccountsClient is the the Azure Storage Management API.
+type AccountsClient struct {
+ BaseClient
+}
+
+// NewAccountsClient creates an instance of the AccountsClient client.
+func NewAccountsClient(subscriptionID string) AccountsClient {
+ return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client.
+func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient {
+ return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CheckNameAvailability checks that the storage account name is valid and is not already in use.
+// Parameters:
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) CheckNameAvailability(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "CheckNameAvailability", err.Error())
+ }
+
+ req, err := client.CheckNameAvailabilityPreparer(ctx, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckNameAvailabilitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckNameAvailabilityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request.
+func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters),
+ autorest.WithJSON(accountName),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Create asynchronously creates a new storage account with the specified parameters. If an account is already created
+// and a subsequent create request is issued with different properties, the account properties will be updated. If an
+// account is already created and a subsequent create or update request is issued with the exact same set of
+// properties, the request will succeed.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide for the created account.
+func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (result AccountsCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Identity", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Identity.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.NetBiosDomainName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.ForestName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainGUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainSid", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.AzureStorageSid", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a storage account in Microsoft Azure.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Failover failover request can be triggered for a storage account in case of availability issues. The failover occurs
+// from the storage account's primary cluster to secondary cluster for RA-GRS accounts. The secondary cluster will
+// become primary after failover.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) Failover(ctx context.Context, resourceGroupName string, accountName string) (result AccountsFailoverFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Failover")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Failover", err.Error())
+ }
+
+ req, err := client.FailoverPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.FailoverSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// FailoverPreparer prepares the Failover request.
+func (client AccountsClient) FailoverPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// FailoverSender sends the Failover request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) FailoverSender(req *http.Request) (future AccountsFailoverFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// FailoverResponder handles the response to the Failover request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) FailoverResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name,
+// location, and account status. The ListKeys operation should be used to retrieve storage keys.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// expand - may be used to expand the properties within account's properties. By default, data is not included
+// when fetching properties. Currently we only support geoReplicationStats.
+func (client AccountsClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (result Account, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.GetProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "GetProperties", err.Error())
+ }
+
+ req, err := client.GetPropertiesPreparer(ctx, resourceGroupName, accountName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetPropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetPropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPropertiesPreparer prepares the GetProperties request.
+func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetPropertiesSender sends the GetProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetPropertiesResponder handles the response to the GetProperties request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use
+// the ListKeys operation for this.
+func (client AccountsClient) List(ctx context.Context) (result AccountListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client AccountsClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) {
+ req, err := lastResults.accountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountsClient) ListComplete(ctx context.Context) (result AccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListAccountSAS list SAS credentials of a storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide to list SAS credentials for the storage account.
+func (client AccountsClient) ListAccountSAS(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListAccountSAS")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListAccountSAS", err.Error())
+ }
+
+ req, err := client.ListAccountSASPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListAccountSASSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListAccountSASResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListAccountSASPreparer prepares the ListAccountSAS request.
+func (client AccountsClient) ListAccountSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListAccountSASSender sends the ListAccountSAS request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys
+// are not returned; use the ListKeys operation for this.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AccountListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListByResourceGroup", err.Error())
+ }
+
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListKeys lists the access keys or Kerberos keys (if active directory enabled) for the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// expand - specifies type of the key to be listed. Possible value is kerb.
+func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (result AccountListKeysResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error())
+ }
+
+ req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListKeysPreparer prepares the ListKeys request.
+func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListKeysSender sends the ListKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListKeysResponder handles the response to the ListKeys request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListServiceSAS list service SAS credentials of a specific resource.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide to list service SAS credentials.
+func (client AccountsClient) ListServiceSAS(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListServiceSAS")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Identifier", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "ListServiceSAS", err.Error())
+ }
+
+ req, err := client.ListServiceSASPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListServiceSASSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListServiceSASResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListServiceSASPreparer prepares the ListServiceSAS request.
+func (client AccountsClient) ListServiceSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListServiceSASSender sends the ListServiceSAS request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RegenerateKey regenerates one of the access keys or Kerberos keys for the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// regenerateKey - specifies name of the key which should be regenerated -- key1, key2, kerb1, kerb2.
+func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RegenerateKey")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: regenerateKey,
+ Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "RegenerateKey", err.Error())
+ }
+
+ req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, regenerateKey)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RegenerateKeySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RegenerateKeyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RegenerateKeyPreparer prepares the RegenerateKey request.
+func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters),
+ autorest.WithJSON(regenerateKey),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RegenerateKeySender sends the RegenerateKey request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RevokeUserDelegationKeys revoke user delegation keys.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client AccountsClient) RevokeUserDelegationKeys(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RevokeUserDelegationKeys")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "RevokeUserDelegationKeys", err.Error())
+ }
+
+ req, err := client.RevokeUserDelegationKeysPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RevokeUserDelegationKeysSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RevokeUserDelegationKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RevokeUserDelegationKeysPreparer prepares the RevokeUserDelegationKeys request.
+func (client AccountsClient) RevokeUserDelegationKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RevokeUserDelegationKeysSender sends the RevokeUserDelegationKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) RevokeUserDelegationKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RevokeUserDelegationKeysResponder handles the response to the RevokeUserDelegationKeys request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) RevokeUserDelegationKeysResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account.
+// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account;
+// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value
+// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This
+// call does not change the storage keys for the account. If you want to change the storage account keys, use the
+// regenerate keys operation. The location and name of the storage account cannot be changed after creation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the parameters to provide for the updated account.
+func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.AccountsClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobcontainers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobcontainers.go
new file mode 100644
index 0000000..402a013
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobcontainers.go
@@ -0,0 +1,1463 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BlobContainersClient is the the Azure Storage Management API.
+type BlobContainersClient struct {
+ BaseClient
+}
+
+// NewBlobContainersClient creates an instance of the BlobContainersClient client.
+func NewBlobContainersClient(subscriptionID string) BlobContainersClient {
+ return NewBlobContainersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewBlobContainersClientWithBaseURI creates an instance of the BlobContainersClient client.
+func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient {
+ return BlobContainersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ClearLegalHold clears legal hold tags. Clearing the same or non-existent tag results in an idempotent operation.
+// ClearLegalHold clears out only the specified tags in the request.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// legalHold - the LegalHold property that will be clear from a blob container.
+func (client BlobContainersClient) ClearLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ClearLegalHold")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: legalHold,
+ Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "ClearLegalHold", err.Error())
+ }
+
+ req, err := client.ClearLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ClearLegalHoldSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ClearLegalHoldResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ClearLegalHoldPreparer prepares the ClearLegalHold request.
+func (client BlobContainersClient) ClearLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ legalHold.HasLegalHold = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold", pathParameters),
+ autorest.WithJSON(legalHold),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ClearLegalHoldSender sends the ClearLegalHold request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ClearLegalHoldSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ClearLegalHoldResponder handles the response to the ClearLegalHold request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ClearLegalHoldResponder(resp *http.Response) (result LegalHold, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Create creates a new container under the specified account as described by request body. The container resource
+// includes metadata and properties for that container. It does not include a list of the blobs contained by the
+// container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// blobContainer - properties of the blob container to create.
+func (client BlobContainersClient) Create(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: blobContainer,
+ Constraints: []validation.Constraint{{Target: "blobContainer.ContainerProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy.ImmutabilityPolicyProperty", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ }}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client BlobContainersClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithJSON(blobContainer),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) CreateResponder(resp *http.Response) (result BlobContainer, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// CreateOrUpdateImmutabilityPolicy creates or updates an unlocked immutability policy. ETag in If-Match is honored if
+// given but not required for this operation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// parameters - the ImmutabilityPolicy Properties that will be created or updated to a blob container.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.CreateOrUpdateImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.CreateOrUpdateImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, parameters, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdateImmutabilityPolicyPreparer prepares the CreateOrUpdateImmutabilityPolicy request.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "immutabilityPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateImmutabilityPolicySender sends the CreateOrUpdateImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateImmutabilityPolicyResponder handles the response to the CreateOrUpdateImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes specified container under its account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+func (client BlobContainersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, containerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client BlobContainersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteImmutabilityPolicy aborts an unlocked immutability policy. The response of delete has
+// immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this operation. Deleting a locked
+// immutability policy is not allowed, only way is to delete the container after deleting all blobs inside the
+// container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) DeleteImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.DeleteImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "DeleteImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.DeleteImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request.
+func (client BlobContainersClient) DeleteImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "immutabilityPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteImmutabilityPolicySender sends the DeleteImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) DeleteImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) DeleteImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ExtendImmutabilityPolicy extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
+// action allowed on a Locked policy will be this action. ETag in If-Match is required for this operation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+// parameters - the ImmutabilityPolicy Properties that will be extended for a blob container.
+func (client BlobContainersClient) ExtendImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ExtendImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "ExtendImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.ExtendImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ExtendImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ExtendImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ExtendImmutabilityPolicyPreparer prepares the ExtendImmutabilityPolicy request.
+func (client BlobContainersClient) ExtendImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ExtendImmutabilityPolicySender sends the ExtendImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ExtendImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ExtendImmutabilityPolicyResponder handles the response to the ExtendImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ExtendImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Get gets properties of a specified container.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+func (client BlobContainersClient) Get(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, containerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client BlobContainersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) GetResponder(resp *http.Response) (result BlobContainer, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetImmutabilityPolicy gets the existing immutability policy along with the corresponding ETag in response headers
+// and body.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) GetImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.GetImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "GetImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.GetImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetImmutabilityPolicyPreparer prepares the GetImmutabilityPolicy request.
+func (client BlobContainersClient) GetImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "immutabilityPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetImmutabilityPolicySender sends the GetImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) GetImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetImmutabilityPolicyResponder handles the response to the GetImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) GetImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Lease the Lease Container operation establishes and manages a lock on a container for delete operations. The lock
+// duration can be 15 to 60 seconds, or can be infinite.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// parameters - lease Container request body.
+func (client BlobContainersClient) Lease(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (result LeaseContainerResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Lease")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Lease", err.Error())
+ }
+
+ req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, containerName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.LeaseSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.LeaseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// LeasePreparer prepares the Lease request.
+func (client BlobContainersClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// LeaseSender sends the Lease request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) LeaseSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// LeaseResponder handles the response to the Lease request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) LeaseResponder(resp *http.Response) (result LeaseContainerResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all containers and does not support a prefix like data plane. Also SRP today does not return continuation
+// token.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// maxpagesize - optional. Specified maximum number of containers that can be included in the list.
+// filter - optional. When specified, only container names starting with the filter will be listed.
+func (client BlobContainersClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListContainerItemsPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List")
+ defer func() {
+ sc := -1
+ if result.lci.Response.Response != nil {
+ sc = result.lci.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lci.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lci, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client BlobContainersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(maxpagesize) > 0 {
+ queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) ListResponder(resp *http.Response) (result ListContainerItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client BlobContainersClient) listNextResults(ctx context.Context, lastResults ListContainerItems) (result ListContainerItems, err error) {
+ req, err := lastResults.listContainerItemsPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client BlobContainersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListContainerItemsIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter)
+ return
+}
+
+// LockImmutabilityPolicy sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
+// ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used
+// to apply the operation only if the immutability policy already exists. If omitted, this operation will
+// always be applied.
+func (client BlobContainersClient) LockImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.LockImmutabilityPolicy")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "LockImmutabilityPolicy", err.Error())
+ }
+
+ req, err := client.LockImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.LockImmutabilityPolicySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.LockImmutabilityPolicyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// LockImmutabilityPolicyPreparer prepares the LockImmutabilityPolicy request.
+func (client BlobContainersClient) LockImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// LockImmutabilityPolicySender sends the LockImmutabilityPolicy request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) LockImmutabilityPolicySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// LockImmutabilityPolicyResponder handles the response to the LockImmutabilityPolicy request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) LockImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetLegalHold sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold follows an
+// append pattern and does not clear out the existing tags that are not specified in the request.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// legalHold - the LegalHold property that will be set to a blob container.
+func (client BlobContainersClient) SetLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.SetLegalHold")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: legalHold,
+ Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "SetLegalHold", err.Error())
+ }
+
+ req, err := client.SetLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetLegalHoldSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetLegalHoldResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// SetLegalHoldPreparer prepares the SetLegalHold request.
+func (client BlobContainersClient) SetLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ legalHold.HasLegalHold = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold", pathParameters),
+ autorest.WithJSON(legalHold),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetLegalHoldSender sends the SetLegalHold request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) SetLegalHoldSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// SetLegalHoldResponder handles the response to the SetLegalHold request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) SetLegalHoldResponder(resp *http.Response) (result LegalHold, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update updates container properties as specified in request body. Properties not mentioned in the request will be
+// unchanged. Update fails if the specified container doesn't already exist.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// containerName - the name of the blob container within the specified storage account. Blob container names
+// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every
+// dash (-) character must be immediately preceded and followed by a letter or number.
+// blobContainer - properties to update for the blob container.
+func (client BlobContainersClient) Update(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: containerName,
+ Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobContainersClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client BlobContainersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "containerName": autorest.Encode("path", containerName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters),
+ autorest.WithJSON(blobContainer),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobContainersClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client BlobContainersClient) UpdateResponder(resp *http.Response) (result BlobContainer, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobservices.go
new file mode 100644
index 0000000..8f29d93
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobservices.go
@@ -0,0 +1,341 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BlobServicesClient is the the Azure Storage Management API.
+type BlobServicesClient struct {
+ BaseClient
+}
+
+// NewBlobServicesClient creates an instance of the BlobServicesClient client.
+func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
+ return NewBlobServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewBlobServicesClientWithBaseURI creates an instance of the BlobServicesClient client.
+func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
+ return BlobServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of a storage account’s Blob service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client BlobServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "BlobServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list blob services of storage account. It returns a collection of one object named default.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage
+// Analytics and CORS (Cross-Origin Resource Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of a storage account’s Blob service, including properties for Storage Analytics
+// and CORS (Cross-Origin Resource Sharing) rules.
+func (client BlobServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (result BlobServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }},
+ {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client BlobServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "BlobServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) SetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
new file mode 100644
index 0000000..98630b5
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
@@ -0,0 +1,51 @@
+// Package storage implements the Azure ARM Storage service API version 2019-06-01.
+//
+// The Azure Storage Management API.
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Storage
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Storage.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileservices.go
new file mode 100644
index 0000000..ff5cf18
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileservices.go
@@ -0,0 +1,335 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FileServicesClient is the the Azure Storage Management API.
+type FileServicesClient struct {
+ BaseClient
+}
+
+// NewFileServicesClient creates an instance of the FileServicesClient client.
+func NewFileServicesClient(subscriptionID string) FileServicesClient {
+ return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client.
+func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
+ return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "FileServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list all file services in storage accounts
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
+ {Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "FileServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileshares.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileshares.go
new file mode 100644
index 0000000..eb5ca77
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileshares.go
@@ -0,0 +1,590 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FileSharesClient is the the Azure Storage Management API.
+type FileSharesClient struct {
+ BaseClient
+}
+
+// NewFileSharesClient creates an instance of the FileSharesClient client.
+func NewFileSharesClient(subscriptionID string) FileSharesClient {
+ return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client.
+func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
+ return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new share under the specified account as described by request body. The share resource includes
+// metadata and properties for that share. It does not include a list of the files contained by the share.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// fileShare - properties of the file share to create.
+func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: fileShare,
+ Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(5120), Chain: nil},
+ {Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithJSON(fileShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes specified share under its account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets properties of a specified share.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all shares.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// maxpagesize - optional. Specified maximum number of shares that can be included in the list.
+// filter - optional. When specified, only share names starting with the filter will be listed.
+func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result FileShareItemsPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
+ defer func() {
+ sc := -1
+ if result.fsi.Response.Response != nil {
+ sc = result.fsi.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.fsi.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.fsi, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(maxpagesize) > 0 {
+ queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) {
+ req, err := lastResults.fileShareItemsPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result FileShareItemsIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter)
+ return
+}
+
+// Update updates share properties as specified in request body. Properties not mentioned in the request will not be
+// changed. Update fails if the specified share does not already exist.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// fileShare - properties to update for the file share.
+func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithJSON(fileShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/managementpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/managementpolicies.go
new file mode 100644
index 0000000..c91d84c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/managementpolicies.go
@@ -0,0 +1,328 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ManagementPoliciesClient is the the Azure Storage Management API.
+type ManagementPoliciesClient struct {
+ BaseClient
+}
+
+// NewManagementPoliciesClient creates an instance of the ManagementPoliciesClient client.
+func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient {
+ return NewManagementPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewManagementPoliciesClientWithBaseURI creates an instance of the ManagementPoliciesClient client.
+func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient {
+ return ManagementPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate sets the managementpolicy to the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// properties - the ManagementPolicy set to a storage account.
+func (client ManagementPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (result ManagementPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: properties,
+ Constraints: []validation.Constraint{{Target: "properties.ManagementPolicyProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("storage.ManagementPoliciesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ManagementPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "managementPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagementPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ManagementPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ManagementPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the managementpolicy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client ManagementPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ManagementPoliciesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ManagementPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "managementPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagementPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ManagementPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the managementpolicy associated with the specified storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client ManagementPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result ManagementPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.ManagementPoliciesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ManagementPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "managementPolicyName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagementPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ManagementPoliciesClient) GetResponder(resp *http.Response) (result ManagementPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/models.go
new file mode 100644
index 0000000..c83dfc8
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/models.go
@@ -0,0 +1,3271 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
+
+// AccessTier enumerates the values for access tier.
+type AccessTier string
+
+const (
+ // Cool ...
+ Cool AccessTier = "Cool"
+ // Hot ...
+ Hot AccessTier = "Hot"
+)
+
+// PossibleAccessTierValues returns an array of possible values for the AccessTier const type.
+func PossibleAccessTierValues() []AccessTier {
+ return []AccessTier{Cool, Hot}
+}
+
+// AccountExpand enumerates the values for account expand.
+type AccountExpand string
+
+const (
+ // AccountExpandGeoReplicationStats ...
+ AccountExpandGeoReplicationStats AccountExpand = "geoReplicationStats"
+)
+
+// PossibleAccountExpandValues returns an array of possible values for the AccountExpand const type.
+func PossibleAccountExpandValues() []AccountExpand {
+ return []AccountExpand{AccountExpandGeoReplicationStats}
+}
+
+// AccountStatus enumerates the values for account status.
+type AccountStatus string
+
+const (
+ // Available ...
+ Available AccountStatus = "available"
+ // Unavailable ...
+ Unavailable AccountStatus = "unavailable"
+)
+
+// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type.
+func PossibleAccountStatusValues() []AccountStatus {
+ return []AccountStatus{Available, Unavailable}
+}
+
+// Action enumerates the values for action.
+type Action string
+
+const (
+ // Allow ...
+ Allow Action = "Allow"
+)
+
+// PossibleActionValues returns an array of possible values for the Action const type.
+func PossibleActionValues() []Action {
+ return []Action{Allow}
+}
+
+// Action1 enumerates the values for action 1.
+type Action1 string
+
+const (
+ // Acquire ...
+ Acquire Action1 = "Acquire"
+ // Break ...
+ Break Action1 = "Break"
+ // Change ...
+ Change Action1 = "Change"
+ // Release ...
+ Release Action1 = "Release"
+ // Renew ...
+ Renew Action1 = "Renew"
+)
+
+// PossibleAction1Values returns an array of possible values for the Action1 const type.
+func PossibleAction1Values() []Action1 {
+ return []Action1{Acquire, Break, Change, Release, Renew}
+}
+
+// Bypass enumerates the values for bypass.
+type Bypass string
+
+const (
+ // AzureServices ...
+ AzureServices Bypass = "AzureServices"
+ // Logging ...
+ Logging Bypass = "Logging"
+ // Metrics ...
+ Metrics Bypass = "Metrics"
+ // None ...
+ None Bypass = "None"
+)
+
+// PossibleBypassValues returns an array of possible values for the Bypass const type.
+func PossibleBypassValues() []Bypass {
+ return []Bypass{AzureServices, Logging, Metrics, None}
+}
+
+// DefaultAction enumerates the values for default action.
+type DefaultAction string
+
+const (
+ // DefaultActionAllow ...
+ DefaultActionAllow DefaultAction = "Allow"
+ // DefaultActionDeny ...
+ DefaultActionDeny DefaultAction = "Deny"
+)
+
+// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type.
+func PossibleDefaultActionValues() []DefaultAction {
+ return []DefaultAction{DefaultActionAllow, DefaultActionDeny}
+}
+
+// DirectoryServiceOptions enumerates the values for directory service options.
+type DirectoryServiceOptions string
+
+const (
+ // DirectoryServiceOptionsAADDS ...
+ DirectoryServiceOptionsAADDS DirectoryServiceOptions = "AADDS"
+ // DirectoryServiceOptionsAD ...
+ DirectoryServiceOptionsAD DirectoryServiceOptions = "AD"
+ // DirectoryServiceOptionsNone ...
+ DirectoryServiceOptionsNone DirectoryServiceOptions = "None"
+)
+
+// PossibleDirectoryServiceOptionsValues returns an array of possible values for the DirectoryServiceOptions const type.
+func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions {
+ return []DirectoryServiceOptions{DirectoryServiceOptionsAADDS, DirectoryServiceOptionsAD, DirectoryServiceOptionsNone}
+}
+
+// GeoReplicationStatus enumerates the values for geo replication status.
+type GeoReplicationStatus string
+
+const (
+ // GeoReplicationStatusBootstrap ...
+ GeoReplicationStatusBootstrap GeoReplicationStatus = "Bootstrap"
+ // GeoReplicationStatusLive ...
+ GeoReplicationStatusLive GeoReplicationStatus = "Live"
+ // GeoReplicationStatusUnavailable ...
+ GeoReplicationStatusUnavailable GeoReplicationStatus = "Unavailable"
+)
+
+// PossibleGeoReplicationStatusValues returns an array of possible values for the GeoReplicationStatus const type.
+func PossibleGeoReplicationStatusValues() []GeoReplicationStatus {
+ return []GeoReplicationStatus{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusUnavailable}
+}
+
+// HTTPProtocol enumerates the values for http protocol.
+type HTTPProtocol string
+
+const (
+ // HTTPS ...
+ HTTPS HTTPProtocol = "https"
+ // Httpshttp ...
+ Httpshttp HTTPProtocol = "https,http"
+)
+
+// PossibleHTTPProtocolValues returns an array of possible values for the HTTPProtocol const type.
+func PossibleHTTPProtocolValues() []HTTPProtocol {
+ return []HTTPProtocol{HTTPS, Httpshttp}
+}
+
+// ImmutabilityPolicyState enumerates the values for immutability policy state.
+type ImmutabilityPolicyState string
+
+const (
+ // Locked ...
+ Locked ImmutabilityPolicyState = "Locked"
+ // Unlocked ...
+ Unlocked ImmutabilityPolicyState = "Unlocked"
+)
+
+// PossibleImmutabilityPolicyStateValues returns an array of possible values for the ImmutabilityPolicyState const type.
+func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState {
+ return []ImmutabilityPolicyState{Locked, Unlocked}
+}
+
+// ImmutabilityPolicyUpdateType enumerates the values for immutability policy update type.
+type ImmutabilityPolicyUpdateType string
+
+const (
+ // Extend ...
+ Extend ImmutabilityPolicyUpdateType = "extend"
+ // Lock ...
+ Lock ImmutabilityPolicyUpdateType = "lock"
+ // Put ...
+ Put ImmutabilityPolicyUpdateType = "put"
+)
+
+// PossibleImmutabilityPolicyUpdateTypeValues returns an array of possible values for the ImmutabilityPolicyUpdateType const type.
+func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType {
+ return []ImmutabilityPolicyUpdateType{Extend, Lock, Put}
+}
+
+// KeyPermission enumerates the values for key permission.
+type KeyPermission string
+
+const (
+ // Full ...
+ Full KeyPermission = "Full"
+ // Read ...
+ Read KeyPermission = "Read"
+)
+
+// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type.
+func PossibleKeyPermissionValues() []KeyPermission {
+ return []KeyPermission{Full, Read}
+}
+
+// KeySource enumerates the values for key source.
+type KeySource string
+
+const (
+ // MicrosoftKeyvault ...
+ MicrosoftKeyvault KeySource = "Microsoft.Keyvault"
+ // MicrosoftStorage ...
+ MicrosoftStorage KeySource = "Microsoft.Storage"
+)
+
+// PossibleKeySourceValues returns an array of possible values for the KeySource const type.
+func PossibleKeySourceValues() []KeySource {
+ return []KeySource{MicrosoftKeyvault, MicrosoftStorage}
+}
+
+// Kind enumerates the values for kind.
+type Kind string
+
+const (
+ // BlobStorage ...
+ BlobStorage Kind = "BlobStorage"
+ // BlockBlobStorage ...
+ BlockBlobStorage Kind = "BlockBlobStorage"
+ // FileStorage ...
+ FileStorage Kind = "FileStorage"
+ // Storage ...
+ Storage Kind = "Storage"
+ // StorageV2 ...
+ StorageV2 Kind = "StorageV2"
+)
+
+// PossibleKindValues returns an array of possible values for the Kind const type.
+func PossibleKindValues() []Kind {
+ return []Kind{BlobStorage, BlockBlobStorage, FileStorage, Storage, StorageV2}
+}
+
+// LargeFileSharesState enumerates the values for large file shares state.
+type LargeFileSharesState string
+
+const (
+ // Disabled ...
+ Disabled LargeFileSharesState = "Disabled"
+ // Enabled ...
+ Enabled LargeFileSharesState = "Enabled"
+)
+
+// PossibleLargeFileSharesStateValues returns an array of possible values for the LargeFileSharesState const type.
+func PossibleLargeFileSharesStateValues() []LargeFileSharesState {
+ return []LargeFileSharesState{Disabled, Enabled}
+}
+
+// LeaseDuration enumerates the values for lease duration.
+type LeaseDuration string
+
+const (
+ // Fixed ...
+ Fixed LeaseDuration = "Fixed"
+ // Infinite ...
+ Infinite LeaseDuration = "Infinite"
+)
+
+// PossibleLeaseDurationValues returns an array of possible values for the LeaseDuration const type.
+func PossibleLeaseDurationValues() []LeaseDuration {
+ return []LeaseDuration{Fixed, Infinite}
+}
+
+// LeaseState enumerates the values for lease state.
+type LeaseState string
+
+const (
+ // LeaseStateAvailable ...
+ LeaseStateAvailable LeaseState = "Available"
+ // LeaseStateBreaking ...
+ LeaseStateBreaking LeaseState = "Breaking"
+ // LeaseStateBroken ...
+ LeaseStateBroken LeaseState = "Broken"
+ // LeaseStateExpired ...
+ LeaseStateExpired LeaseState = "Expired"
+ // LeaseStateLeased ...
+ LeaseStateLeased LeaseState = "Leased"
+)
+
+// PossibleLeaseStateValues returns an array of possible values for the LeaseState const type.
+func PossibleLeaseStateValues() []LeaseState {
+ return []LeaseState{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased}
+}
+
+// LeaseStatus enumerates the values for lease status.
+type LeaseStatus string
+
+const (
+ // LeaseStatusLocked ...
+ LeaseStatusLocked LeaseStatus = "Locked"
+ // LeaseStatusUnlocked ...
+ LeaseStatusUnlocked LeaseStatus = "Unlocked"
+)
+
+// PossibleLeaseStatusValues returns an array of possible values for the LeaseStatus const type.
+func PossibleLeaseStatusValues() []LeaseStatus {
+ return []LeaseStatus{LeaseStatusLocked, LeaseStatusUnlocked}
+}
+
+// ListKeyExpand enumerates the values for list key expand.
+type ListKeyExpand string
+
+const (
+ // Kerb ...
+ Kerb ListKeyExpand = "kerb"
+)
+
+// PossibleListKeyExpandValues returns an array of possible values for the ListKeyExpand const type.
+func PossibleListKeyExpandValues() []ListKeyExpand {
+ return []ListKeyExpand{Kerb}
+}
+
+// Permissions enumerates the values for permissions.
+type Permissions string
+
+const (
+ // A ...
+ A Permissions = "a"
+ // C ...
+ C Permissions = "c"
+ // D ...
+ D Permissions = "d"
+ // L ...
+ L Permissions = "l"
+ // P ...
+ P Permissions = "p"
+ // R ...
+ R Permissions = "r"
+ // U ...
+ U Permissions = "u"
+ // W ...
+ W Permissions = "w"
+)
+
+// PossiblePermissionsValues returns an array of possible values for the Permissions const type.
+func PossiblePermissionsValues() []Permissions {
+ return []Permissions{A, C, D, L, P, R, U, W}
+}
+
+// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection
+// provisioning state.
+type PrivateEndpointConnectionProvisioningState string
+
+const (
+ // Creating ...
+ Creating PrivateEndpointConnectionProvisioningState = "Creating"
+ // Deleting ...
+ Deleting PrivateEndpointConnectionProvisioningState = "Deleting"
+ // Failed ...
+ Failed PrivateEndpointConnectionProvisioningState = "Failed"
+ // Succeeded ...
+ Succeeded PrivateEndpointConnectionProvisioningState = "Succeeded"
+)
+
+// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type.
+func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState {
+ return []PrivateEndpointConnectionProvisioningState{Creating, Deleting, Failed, Succeeded}
+}
+
+// PrivateEndpointServiceConnectionStatus enumerates the values for private endpoint service connection status.
+type PrivateEndpointServiceConnectionStatus string
+
+const (
+ // Approved ...
+ Approved PrivateEndpointServiceConnectionStatus = "Approved"
+ // Pending ...
+ Pending PrivateEndpointServiceConnectionStatus = "Pending"
+ // Rejected ...
+ Rejected PrivateEndpointServiceConnectionStatus = "Rejected"
+)
+
+// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type.
+func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
+ return []PrivateEndpointServiceConnectionStatus{Approved, Pending, Rejected}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // ProvisioningStateCreating ...
+ ProvisioningStateCreating ProvisioningState = "Creating"
+ // ProvisioningStateResolvingDNS ...
+ ProvisioningStateResolvingDNS ProvisioningState = "ResolvingDNS"
+ // ProvisioningStateSucceeded ...
+ ProvisioningStateSucceeded ProvisioningState = "Succeeded"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateResolvingDNS, ProvisioningStateSucceeded}
+}
+
+// PublicAccess enumerates the values for public access.
+type PublicAccess string
+
+const (
+ // PublicAccessBlob ...
+ PublicAccessBlob PublicAccess = "Blob"
+ // PublicAccessContainer ...
+ PublicAccessContainer PublicAccess = "Container"
+ // PublicAccessNone ...
+ PublicAccessNone PublicAccess = "None"
+)
+
+// PossiblePublicAccessValues returns an array of possible values for the PublicAccess const type.
+func PossiblePublicAccessValues() []PublicAccess {
+ return []PublicAccess{PublicAccessBlob, PublicAccessContainer, PublicAccessNone}
+}
+
+// Reason enumerates the values for reason.
+type Reason string
+
+const (
+ // AccountNameInvalid ...
+ AccountNameInvalid Reason = "AccountNameInvalid"
+ // AlreadyExists ...
+ AlreadyExists Reason = "AlreadyExists"
+)
+
+// PossibleReasonValues returns an array of possible values for the Reason const type.
+func PossibleReasonValues() []Reason {
+ return []Reason{AccountNameInvalid, AlreadyExists}
+}
+
+// ReasonCode enumerates the values for reason code.
+type ReasonCode string
+
+const (
+ // NotAvailableForSubscription ...
+ NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription"
+ // QuotaID ...
+ QuotaID ReasonCode = "QuotaId"
+)
+
+// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type.
+func PossibleReasonCodeValues() []ReasonCode {
+ return []ReasonCode{NotAvailableForSubscription, QuotaID}
+}
+
+// Services enumerates the values for services.
+type Services string
+
+const (
+ // B ...
+ B Services = "b"
+ // F ...
+ F Services = "f"
+ // Q ...
+ Q Services = "q"
+ // T ...
+ T Services = "t"
+)
+
+// PossibleServicesValues returns an array of possible values for the Services const type.
+func PossibleServicesValues() []Services {
+ return []Services{B, F, Q, T}
+}
+
+// SignedResource enumerates the values for signed resource.
+type SignedResource string
+
+const (
+ // SignedResourceB ...
+ SignedResourceB SignedResource = "b"
+ // SignedResourceC ...
+ SignedResourceC SignedResource = "c"
+ // SignedResourceF ...
+ SignedResourceF SignedResource = "f"
+ // SignedResourceS ...
+ SignedResourceS SignedResource = "s"
+)
+
+// PossibleSignedResourceValues returns an array of possible values for the SignedResource const type.
+func PossibleSignedResourceValues() []SignedResource {
+ return []SignedResource{SignedResourceB, SignedResourceC, SignedResourceF, SignedResourceS}
+}
+
+// SignedResourceTypes enumerates the values for signed resource types.
+type SignedResourceTypes string
+
+const (
+ // SignedResourceTypesC ...
+ SignedResourceTypesC SignedResourceTypes = "c"
+ // SignedResourceTypesO ...
+ SignedResourceTypesO SignedResourceTypes = "o"
+ // SignedResourceTypesS ...
+ SignedResourceTypesS SignedResourceTypes = "s"
+)
+
+// PossibleSignedResourceTypesValues returns an array of possible values for the SignedResourceTypes const type.
+func PossibleSignedResourceTypesValues() []SignedResourceTypes {
+ return []SignedResourceTypes{SignedResourceTypesC, SignedResourceTypesO, SignedResourceTypesS}
+}
+
+// SkuName enumerates the values for sku name.
+type SkuName string
+
+const (
+ // PremiumLRS ...
+ PremiumLRS SkuName = "Premium_LRS"
+ // PremiumZRS ...
+ PremiumZRS SkuName = "Premium_ZRS"
+ // StandardGRS ...
+ StandardGRS SkuName = "Standard_GRS"
+ // StandardGZRS ...
+ StandardGZRS SkuName = "Standard_GZRS"
+ // StandardLRS ...
+ StandardLRS SkuName = "Standard_LRS"
+ // StandardRAGRS ...
+ StandardRAGRS SkuName = "Standard_RAGRS"
+ // StandardRAGZRS ...
+ StandardRAGZRS SkuName = "Standard_RAGZRS"
+ // StandardZRS ...
+ StandardZRS SkuName = "Standard_ZRS"
+)
+
+// PossibleSkuNameValues returns an array of possible values for the SkuName const type.
+func PossibleSkuNameValues() []SkuName {
+ return []SkuName{PremiumLRS, PremiumZRS, StandardGRS, StandardGZRS, StandardLRS, StandardRAGRS, StandardRAGZRS, StandardZRS}
+}
+
+// SkuTier enumerates the values for sku tier.
+type SkuTier string
+
+const (
+ // Premium ...
+ Premium SkuTier = "Premium"
+ // Standard ...
+ Standard SkuTier = "Standard"
+)
+
+// PossibleSkuTierValues returns an array of possible values for the SkuTier const type.
+func PossibleSkuTierValues() []SkuTier {
+ return []SkuTier{Premium, Standard}
+}
+
+// State enumerates the values for state.
+type State string
+
+const (
+ // StateDeprovisioning ...
+ StateDeprovisioning State = "deprovisioning"
+ // StateFailed ...
+ StateFailed State = "failed"
+ // StateNetworkSourceDeleted ...
+ StateNetworkSourceDeleted State = "networkSourceDeleted"
+ // StateProvisioning ...
+ StateProvisioning State = "provisioning"
+ // StateSucceeded ...
+ StateSucceeded State = "succeeded"
+)
+
+// PossibleStateValues returns an array of possible values for the State const type.
+func PossibleStateValues() []State {
+ return []State{StateDeprovisioning, StateFailed, StateNetworkSourceDeleted, StateProvisioning, StateSucceeded}
+}
+
+// UsageUnit enumerates the values for usage unit.
+type UsageUnit string
+
+const (
+ // Bytes ...
+ Bytes UsageUnit = "Bytes"
+ // BytesPerSecond ...
+ BytesPerSecond UsageUnit = "BytesPerSecond"
+ // Count ...
+ Count UsageUnit = "Count"
+ // CountsPerSecond ...
+ CountsPerSecond UsageUnit = "CountsPerSecond"
+ // Percent ...
+ Percent UsageUnit = "Percent"
+ // Seconds ...
+ Seconds UsageUnit = "Seconds"
+)
+
+// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type.
+func PossibleUsageUnitValues() []UsageUnit {
+ return []UsageUnit{Bytes, BytesPerSecond, Count, CountsPerSecond, Percent, Seconds}
+}
+
+// Account the storage account.
+type Account struct {
+ autorest.Response `json:"-"`
+ // Sku - READ-ONLY; Gets the SKU.
+ Sku *Sku `json:"sku,omitempty"`
+ // Kind - READ-ONLY; Gets the Kind. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+ // Identity - The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // AccountProperties - Properties of the storage account.
+ *AccountProperties `json:"properties,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Location - The geo-location where the resource lives
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Account.
+func (a Account) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if a.Identity != nil {
+ objectMap["identity"] = a.Identity
+ }
+ if a.AccountProperties != nil {
+ objectMap["properties"] = a.AccountProperties
+ }
+ if a.Tags != nil {
+ objectMap["tags"] = a.Tags
+ }
+ if a.Location != nil {
+ objectMap["location"] = a.Location
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Account struct.
+func (a *Account) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ a.Sku = &sku
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ a.Kind = kind
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ a.Identity = &identity
+ }
+ case "properties":
+ if v != nil {
+ var accountProperties AccountProperties
+ err = json.Unmarshal(*v, &accountProperties)
+ if err != nil {
+ return err
+ }
+ a.AccountProperties = &accountProperties
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ a.Tags = tags
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ a.Location = &location
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ a.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ a.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ a.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountCheckNameAvailabilityParameters the parameters used to check the availability of the storage
+// account name.
+type AccountCheckNameAvailabilityParameters struct {
+ // Name - The storage account name.
+ Name *string `json:"name,omitempty"`
+ // Type - The type of resource, Microsoft.Storage/storageAccounts
+ Type *string `json:"type,omitempty"`
+}
+
+// AccountCreateParameters the parameters used when creating a storage account.
+type AccountCreateParameters struct {
+ // Sku - Required. Gets or sets the SKU name.
+ Sku *Sku `json:"sku,omitempty"`
+ // Kind - Required. Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+ // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed.
+ Location *string `json:"location,omitempty"`
+ // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters.
+ Tags map[string]*string `json:"tags"`
+ // Identity - The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // AccountPropertiesCreateParameters - The parameters used to create the storage account.
+ *AccountPropertiesCreateParameters `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountCreateParameters.
+func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if acp.Sku != nil {
+ objectMap["sku"] = acp.Sku
+ }
+ if acp.Kind != "" {
+ objectMap["kind"] = acp.Kind
+ }
+ if acp.Location != nil {
+ objectMap["location"] = acp.Location
+ }
+ if acp.Tags != nil {
+ objectMap["tags"] = acp.Tags
+ }
+ if acp.Identity != nil {
+ objectMap["identity"] = acp.Identity
+ }
+ if acp.AccountPropertiesCreateParameters != nil {
+ objectMap["properties"] = acp.AccountPropertiesCreateParameters
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct.
+func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ acp.Sku = &sku
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ acp.Kind = kind
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ acp.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ acp.Tags = tags
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ acp.Identity = &identity
+ }
+ case "properties":
+ if v != nil {
+ var accountPropertiesCreateParameters AccountPropertiesCreateParameters
+ err = json.Unmarshal(*v, &accountPropertiesCreateParameters)
+ if err != nil {
+ return err
+ }
+ acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountKey an access key for the storage account.
+type AccountKey struct {
+ // KeyName - READ-ONLY; Name of the key.
+ KeyName *string `json:"keyName,omitempty"`
+ // Value - READ-ONLY; Base 64-encoded value of the key.
+ Value *string `json:"value,omitempty"`
+ // Permissions - READ-ONLY; Permissions for the key -- read-only or full permissions. Possible values include: 'Read', 'Full'
+ Permissions KeyPermission `json:"permissions,omitempty"`
+}
+
+// AccountListKeysResult the response from the ListKeys operation.
+type AccountListKeysResult struct {
+ autorest.Response `json:"-"`
+ // Keys - READ-ONLY; Gets the list of storage account keys and their properties for the specified storage account.
+ Keys *[]AccountKey `json:"keys,omitempty"`
+}
+
+// AccountListResult the response from the List Storage Accounts operation.
+type AccountListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; Gets the list of storage accounts and their properties.
+ Value *[]Account `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of storage accounts. Returned when total number of requested storage accounts exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// AccountListResultIterator provides access to a complete listing of Account values.
+type AccountListResultIterator struct {
+ i int
+ page AccountListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AccountListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AccountListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AccountListResultIterator) Response() AccountListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AccountListResultIterator) Value() Account {
+ if !iter.page.NotDone() {
+ return Account{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AccountListResultIterator type.
+func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
+ return AccountListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr AccountListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// accountListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.NextLink)))
+}
+
+// AccountListResultPage contains a page of Account values.
+type AccountListResultPage struct {
+ fn func(context.Context, AccountListResult) (AccountListResult, error)
+ alr AccountListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AccountListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AccountListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AccountListResultPage) Response() AccountListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AccountListResultPage) Values() []Account {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the AccountListResultPage type.
+func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
+ return AccountListResultPage{fn: getNextPage}
+}
+
+// AccountProperties properties of the storage account.
+type AccountProperties struct {
+ // ProvisioningState - READ-ONLY; Gets the status of the storage account at the time the operation was called. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateResolvingDNS', 'ProvisioningStateSucceeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // PrimaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint.
+ PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"`
+ // PrimaryLocation - READ-ONLY; Gets the location of the primary data center for the storage account.
+ PrimaryLocation *string `json:"primaryLocation,omitempty"`
+ // StatusOfPrimary - READ-ONLY; Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'Available', 'Unavailable'
+ StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"`
+ // LastGeoFailoverTime - READ-ONLY; Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS.
+ LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"`
+ // SecondaryLocation - READ-ONLY; Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS.
+ SecondaryLocation *string `json:"secondaryLocation,omitempty"`
+ // StatusOfSecondary - READ-ONLY; Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'Available', 'Unavailable'
+ StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"`
+ // CreationTime - READ-ONLY; Gets the creation date and time of the storage account in UTC.
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // CustomDomain - READ-ONLY; Gets the custom domain the user assigned to this storage account.
+ CustomDomain *CustomDomain `json:"customDomain,omitempty"`
+ // SecondaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS.
+ SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"`
+ // Encryption - READ-ONLY; Gets the encryption settings on the account. If unspecified, the account is unencrypted.
+ Encryption *Encryption `json:"encryption,omitempty"`
+ // AccessTier - READ-ONLY; Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool'
+ AccessTier AccessTier `json:"accessTier,omitempty"`
+ // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files.
+ AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"`
+ // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true.
+ EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
+ // NetworkRuleSet - READ-ONLY; Network rule set
+ NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
+ // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true.
+ IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"`
+ // GeoReplicationStats - READ-ONLY; Geo Replication Stats
+ GeoReplicationStats *GeoReplicationStats `json:"geoReplicationStats,omitempty"`
+ // FailoverInProgress - READ-ONLY; If the failover is in progress, the value will be true, otherwise, it will be null.
+ FailoverInProgress *bool `json:"failoverInProgress,omitempty"`
+ // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'Disabled', 'Enabled'
+ LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"`
+ // PrivateEndpointConnections - READ-ONLY; List of private endpoint connection associated with the specified storage account
+ PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"`
+}
+
+// AccountPropertiesCreateParameters the parameters used to create the storage account.
+type AccountPropertiesCreateParameters struct {
+ // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
+ CustomDomain *CustomDomain `json:"customDomain,omitempty"`
+ // Encryption - Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled.
+ Encryption *Encryption `json:"encryption,omitempty"`
+ // NetworkRuleSet - Network rule set
+ NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
+ // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool'
+ AccessTier AccessTier `json:"accessTier,omitempty"`
+ // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files.
+ AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"`
+ // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01.
+ EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
+ // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true.
+ IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"`
+ // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'Disabled', 'Enabled'
+ LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"`
+}
+
+// AccountPropertiesUpdateParameters the parameters used when updating a storage account.
+type AccountPropertiesUpdateParameters struct {
+ // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
+ CustomDomain *CustomDomain `json:"customDomain,omitempty"`
+ // Encryption - Provides the encryption settings on the account. The default setting is unencrypted.
+ Encryption *Encryption `json:"encryption,omitempty"`
+ // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool'
+ AccessTier AccessTier `json:"accessTier,omitempty"`
+ // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files.
+ AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"`
+ // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true.
+ EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
+ // NetworkRuleSet - Network rule set
+ NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
+ // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'Disabled', 'Enabled'
+ LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"`
+}
+
+// AccountRegenerateKeyParameters the parameters used to regenerate the storage account key.
+type AccountRegenerateKeyParameters struct {
+ // KeyName - The name of storage keys that want to be regenerated, possible values are key1, key2, kerb1, kerb2.
+ KeyName *string `json:"keyName,omitempty"`
+}
+
+// AccountSasParameters the parameters to list SAS credentials of a storage account.
+type AccountSasParameters struct {
+ // Services - The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). Possible values include: 'B', 'Q', 'T', 'F'
+ Services Services `json:"signedServices,omitempty"`
+ // ResourceTypes - The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. Possible values include: 'SignedResourceTypesS', 'SignedResourceTypesC', 'SignedResourceTypesO'
+ ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"`
+ // Permissions - The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'R', 'D', 'W', 'L', 'A', 'C', 'U', 'P'
+ Permissions Permissions `json:"signedPermission,omitempty"`
+ // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests.
+ IPAddressOrRange *string `json:"signedIp,omitempty"`
+ // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'Httpshttp', 'HTTPS'
+ Protocols HTTPProtocol `json:"signedProtocol,omitempty"`
+ // SharedAccessStartTime - The time at which the SAS becomes valid.
+ SharedAccessStartTime *date.Time `json:"signedStart,omitempty"`
+ // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid.
+ SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"`
+ // KeyToSign - The key to sign the account SAS token with.
+ KeyToSign *string `json:"keyToSign,omitempty"`
+}
+
+// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AccountsCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *AccountsCreateFuture) Result(client AccountsClient) (a Account, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent {
+ a, err = client.CreateResponder(a.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// AccountsFailoverFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AccountsFailoverFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *AccountsFailoverFuture) Result(client AccountsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.AccountsFailoverFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("storage.AccountsFailoverFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// AccountUpdateParameters the parameters that can be provided when updating the storage account
+// properties.
+type AccountUpdateParameters struct {
+ // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, nor can accounts of those SKU names be updated to any other value.
+ Sku *Sku `json:"sku,omitempty"`
+ // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters.
+ Tags map[string]*string `json:"tags"`
+ // Identity - The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // AccountPropertiesUpdateParameters - The parameters used when updating a storage account.
+ *AccountPropertiesUpdateParameters `json:"properties,omitempty"`
+ // Kind - Optional. Indicates the type of storage account. Currently only StorageV2 value supported by server. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountUpdateParameters.
+func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if aup.Sku != nil {
+ objectMap["sku"] = aup.Sku
+ }
+ if aup.Tags != nil {
+ objectMap["tags"] = aup.Tags
+ }
+ if aup.Identity != nil {
+ objectMap["identity"] = aup.Identity
+ }
+ if aup.AccountPropertiesUpdateParameters != nil {
+ objectMap["properties"] = aup.AccountPropertiesUpdateParameters
+ }
+ if aup.Kind != "" {
+ objectMap["kind"] = aup.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct.
+func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ aup.Sku = &sku
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ aup.Tags = tags
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ aup.Identity = &identity
+ }
+ case "properties":
+ if v != nil {
+ var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters
+ err = json.Unmarshal(*v, &accountPropertiesUpdateParameters)
+ if err != nil {
+ return err
+ }
+ aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ aup.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// ActiveDirectoryProperties settings properties for Active Directory (AD).
+type ActiveDirectoryProperties struct {
+ // DomainName - Specifies the primary domain that the AD DNS server is authoritative for.
+ DomainName *string `json:"domainName,omitempty"`
+ // NetBiosDomainName - Specifies the NetBIOS domain name.
+ NetBiosDomainName *string `json:"netBiosDomainName,omitempty"`
+ // ForestName - Specifies the Active Directory forest to get.
+ ForestName *string `json:"forestName,omitempty"`
+ // DomainGUID - Specifies the domain GUID.
+ DomainGUID *string `json:"domainGuid,omitempty"`
+ // DomainSid - Specifies the security identifier (SID).
+ DomainSid *string `json:"domainSid,omitempty"`
+ // AzureStorageSid - Specifies the security identifier (SID) for Azure Storage.
+ AzureStorageSid *string `json:"azureStorageSid,omitempty"`
+}
+
+// AzureEntityResource the resource model definition for a Azure Resource Manager resource with an etag.
+type AzureEntityResource struct {
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// AzureFilesIdentityBasedAuthentication settings for Azure Files identity based authentication.
+type AzureFilesIdentityBasedAuthentication struct {
+ // DirectoryServiceOptions - Indicates the directory service used. Possible values include: 'DirectoryServiceOptionsNone', 'DirectoryServiceOptionsAADDS', 'DirectoryServiceOptionsAD'
+ DirectoryServiceOptions DirectoryServiceOptions `json:"directoryServiceOptions,omitempty"`
+ // ActiveDirectoryProperties - Required if choose AD.
+ ActiveDirectoryProperties *ActiveDirectoryProperties `json:"activeDirectoryProperties,omitempty"`
+}
+
+// BlobContainer properties of the blob container, including Id, resource name, resource type, Etag.
+type BlobContainer struct {
+ autorest.Response `json:"-"`
+ // ContainerProperties - Properties of the blob container.
+ *ContainerProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobContainer.
+func (bc BlobContainer) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bc.ContainerProperties != nil {
+ objectMap["properties"] = bc.ContainerProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for BlobContainer struct.
+func (bc *BlobContainer) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var containerProperties ContainerProperties
+ err = json.Unmarshal(*v, &containerProperties)
+ if err != nil {
+ return err
+ }
+ bc.ContainerProperties = &containerProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ bc.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ bc.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ bc.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ bc.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// BlobServiceItems ...
+type BlobServiceItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of blob services returned.
+ Value *[]BlobServiceProperties `json:"value,omitempty"`
+}
+
+// BlobServiceProperties the properties of a storage account’s Blob service.
+type BlobServiceProperties struct {
+ autorest.Response `json:"-"`
+ // BlobServicePropertiesProperties - The properties of a storage account’s Blob service.
+ *BlobServicePropertiesProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BlobServiceProperties.
+func (bsp BlobServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bsp.BlobServicePropertiesProperties != nil {
+ objectMap["properties"] = bsp.BlobServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for BlobServiceProperties struct.
+func (bsp *BlobServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var blobServiceProperties BlobServicePropertiesProperties
+ err = json.Unmarshal(*v, &blobServiceProperties)
+ if err != nil {
+ return err
+ }
+ bsp.BlobServicePropertiesProperties = &blobServiceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ bsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ bsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ bsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// BlobServicePropertiesProperties the properties of a storage account’s Blob service.
+type BlobServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service.
+ Cors *CorsRules `json:"cors,omitempty"`
+ // DefaultServiceVersion - DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions.
+ DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty"`
+ // DeleteRetentionPolicy - The blob service properties for blob soft delete.
+ DeleteRetentionPolicy *DeleteRetentionPolicy `json:"deleteRetentionPolicy,omitempty"`
+ // AutomaticSnapshotPolicyEnabled - Automatic Snapshot is enabled if set to true.
+ AutomaticSnapshotPolicyEnabled *bool `json:"automaticSnapshotPolicyEnabled,omitempty"`
+ // ChangeFeed - The blob service properties for change feed events.
+ ChangeFeed *ChangeFeed `json:"changeFeed,omitempty"`
+ // ContainerDeleteRetentionPolicy - The blob service properties for container soft delete.
+ ContainerDeleteRetentionPolicy *DeleteRetentionPolicy `json:"containerDeleteRetentionPolicy,omitempty"`
+}
+
+// ChangeFeed the blob service properties for change feed events.
+type ChangeFeed struct {
+ // Enabled - Indicates whether change feed event logging is enabled for the Blob service.
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// CheckNameAvailabilityResult the CheckNameAvailability operation response.
+type CheckNameAvailabilityResult struct {
+ autorest.Response `json:"-"`
+ // NameAvailable - READ-ONLY; Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used.
+ NameAvailable *bool `json:"nameAvailable,omitempty"`
+ // Reason - READ-ONLY; Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'AccountNameInvalid', 'AlreadyExists'
+ Reason Reason `json:"reason,omitempty"`
+ // Message - READ-ONLY; Gets an error message explaining the Reason value in more detail.
+ Message *string `json:"message,omitempty"`
+}
+
+// CloudError an error response from the Storage service.
+type CloudError struct {
+ Error *CloudErrorBody `json:"error,omitempty"`
+}
+
+// CloudErrorBody an error response from the Storage service.
+type CloudErrorBody struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ // Target - The target of the particular error. For example, the name of the property in error.
+ Target *string `json:"target,omitempty"`
+ // Details - A list of additional details about the error.
+ Details *[]CloudErrorBody `json:"details,omitempty"`
+}
+
+// ContainerProperties the properties of a container.
+type ContainerProperties struct {
+ // PublicAccess - Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone'
+ PublicAccess PublicAccess `json:"publicAccess,omitempty"`
+ // LastModifiedTime - READ-ONLY; Returns the date and time the container was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // LeaseStatus - READ-ONLY; The lease status of the container. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked'
+ LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"`
+ // LeaseState - READ-ONLY; Lease state of the container. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken'
+ LeaseState LeaseState `json:"leaseState,omitempty"`
+ // LeaseDuration - READ-ONLY; Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. Possible values include: 'Infinite', 'Fixed'
+ LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"`
+ // Metadata - A name-value pair to associate with the container as metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ImmutabilityPolicy - READ-ONLY; The ImmutabilityPolicy property of the container.
+ ImmutabilityPolicy *ImmutabilityPolicyProperties `json:"immutabilityPolicy,omitempty"`
+ // LegalHold - READ-ONLY; The LegalHold property of the container.
+ LegalHold *LegalHoldProperties `json:"legalHold,omitempty"`
+ // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account.
+ HasLegalHold *bool `json:"hasLegalHold,omitempty"`
+ // HasImmutabilityPolicy - READ-ONLY; The hasImmutabilityPolicy public property is set to true by SRP if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public property is set to false by SRP if ImmutabilityPolicy has not been created for this container.
+ HasImmutabilityPolicy *bool `json:"hasImmutabilityPolicy,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ContainerProperties.
+func (cp ContainerProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cp.PublicAccess != "" {
+ objectMap["publicAccess"] = cp.PublicAccess
+ }
+ if cp.Metadata != nil {
+ objectMap["metadata"] = cp.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// CorsRule specifies a CORS rule for the Blob service.
+type CorsRule struct {
+ // AllowedOrigins - Required if CorsRule element is present. A list of origin domains that will be allowed via CORS, or "*" to allow all domains
+ AllowedOrigins *[]string `json:"allowedOrigins,omitempty"`
+ // AllowedMethods - Required if CorsRule element is present. A list of HTTP methods that are allowed to be executed by the origin.
+ AllowedMethods *[]string `json:"allowedMethods,omitempty"`
+ // MaxAgeInSeconds - Required if CorsRule element is present. The number of seconds that the client/browser should cache a preflight response.
+ MaxAgeInSeconds *int32 `json:"maxAgeInSeconds,omitempty"`
+ // ExposedHeaders - Required if CorsRule element is present. A list of response headers to expose to CORS clients.
+ ExposedHeaders *[]string `json:"exposedHeaders,omitempty"`
+ // AllowedHeaders - Required if CorsRule element is present. A list of headers allowed to be part of the cross-origin request.
+ AllowedHeaders *[]string `json:"allowedHeaders,omitempty"`
+}
+
+// CorsRules sets the CORS rules. You can include up to five CorsRule elements in the request.
+type CorsRules struct {
+ // CorsRules - The List of CORS rules. You can include up to five CorsRule elements in the request.
+ CorsRules *[]CorsRule `json:"corsRules,omitempty"`
+}
+
+// CustomDomain the custom domain assigned to this storage account. This can be set via Update.
+type CustomDomain struct {
+ // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source.
+ Name *string `json:"name,omitempty"`
+ // UseSubDomainName - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates.
+ UseSubDomainName *bool `json:"useSubDomainName,omitempty"`
+}
+
+// DateAfterCreation object to define the number of days after creation.
+type DateAfterCreation struct {
+ // DaysAfterCreationGreaterThan - Value indicating the age in days after creation
+ DaysAfterCreationGreaterThan *float64 `json:"daysAfterCreationGreaterThan,omitempty"`
+}
+
+// DateAfterModification object to define the number of days after last modification.
+type DateAfterModification struct {
+ // DaysAfterModificationGreaterThan - Value indicating the age in days after last modification
+ DaysAfterModificationGreaterThan *float64 `json:"daysAfterModificationGreaterThan,omitempty"`
+}
+
+// DeleteRetentionPolicy the blob service properties for soft delete.
+type DeleteRetentionPolicy struct {
+ // Enabled - Indicates whether DeleteRetentionPolicy is enabled for the Blob service.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Days - Indicates the number of days that the deleted blob should be retained. The minimum specified value can be 1 and the maximum value can be 365.
+ Days *int32 `json:"days,omitempty"`
+}
+
+// Dimension dimension of blobs, possibly be blob type or access tier.
+type Dimension struct {
+ // Name - Display name of dimension.
+ Name *string `json:"name,omitempty"`
+ // DisplayName - Display name of dimension.
+ DisplayName *string `json:"displayName,omitempty"`
+}
+
+// Encryption the encryption settings on the storage account.
+type Encryption struct {
+ // Services - List of services which support encryption.
+ Services *EncryptionServices `json:"services,omitempty"`
+ // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include: 'MicrosoftStorage', 'MicrosoftKeyvault'
+ KeySource KeySource `json:"keySource,omitempty"`
+ // KeyVaultProperties - Properties provided by key vault.
+ KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"`
+}
+
+// EncryptionService a service that allows server-side encryption to be used.
+type EncryptionService struct {
+ // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored.
+ Enabled *bool `json:"enabled,omitempty"`
+ // LastEnabledTime - READ-ONLY; Gets a rough estimate of the date/time when the encryption was last enabled by the user. Only returned when encryption is enabled. There might be some unencrypted blobs which were written after this time, as it is just a rough estimate.
+ LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"`
+}
+
+// EncryptionServices a list of services that support encryption.
+type EncryptionServices struct {
+ // Blob - The encryption function of the blob storage service.
+ Blob *EncryptionService `json:"blob,omitempty"`
+ // File - The encryption function of the file storage service.
+ File *EncryptionService `json:"file,omitempty"`
+ // Table - READ-ONLY; The encryption function of the table storage service.
+ Table *EncryptionService `json:"table,omitempty"`
+ // Queue - READ-ONLY; The encryption function of the queue storage service.
+ Queue *EncryptionService `json:"queue,omitempty"`
+}
+
+// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, table, web or dfs
+// object.
+type Endpoints struct {
+ // Blob - READ-ONLY; Gets the blob endpoint.
+ Blob *string `json:"blob,omitempty"`
+ // Queue - READ-ONLY; Gets the queue endpoint.
+ Queue *string `json:"queue,omitempty"`
+ // Table - READ-ONLY; Gets the table endpoint.
+ Table *string `json:"table,omitempty"`
+ // File - READ-ONLY; Gets the file endpoint.
+ File *string `json:"file,omitempty"`
+ // Web - READ-ONLY; Gets the web endpoint.
+ Web *string `json:"web,omitempty"`
+ // Dfs - READ-ONLY; Gets the dfs endpoint.
+ Dfs *string `json:"dfs,omitempty"`
+}
+
+// ErrorResponse an error response from the storage resource provider.
+type ErrorResponse struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+}
+
+// FileServiceItems ...
+type FileServiceItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of file services returned.
+ Value *[]FileServiceProperties `json:"value,omitempty"`
+}
+
+// FileServiceProperties the properties of File services in storage account.
+type FileServiceProperties struct {
+ autorest.Response `json:"-"`
+ // FileServicePropertiesProperties - The properties of File services in storage account.
+ *FileServicePropertiesProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileServiceProperties.
+func (fsp FileServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsp.FileServicePropertiesProperties != nil {
+ objectMap["properties"] = fsp.FileServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileServiceProperties struct.
+func (fsp *FileServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileServiceProperties FileServicePropertiesProperties
+ err = json.Unmarshal(*v, &fileServiceProperties)
+ if err != nil {
+ return err
+ }
+ fsp.FileServicePropertiesProperties = &fileServiceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileServicePropertiesProperties the properties of File services in storage account.
+type FileServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the File service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the File service.
+ Cors *CorsRules `json:"cors,omitempty"`
+ // ShareDeleteRetentionPolicy - The file service properties for share soft delete.
+ ShareDeleteRetentionPolicy *DeleteRetentionPolicy `json:"shareDeleteRetentionPolicy,omitempty"`
+}
+
+// FileShare properties of the file share, including Id, resource name, resource type, Etag.
+type FileShare struct {
+ autorest.Response `json:"-"`
+ // FileShareProperties - Properties of the file share.
+ *FileShareProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShare.
+func (fs FileShare) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fs.FileShareProperties != nil {
+ objectMap["properties"] = fs.FileShareProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileShare struct.
+func (fs *FileShare) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileShareProperties FileShareProperties
+ err = json.Unmarshal(*v, &fileShareProperties)
+ if err != nil {
+ return err
+ }
+ fs.FileShareProperties = &fileShareProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fs.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fs.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fs.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fs.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileShareItem the file share properties be listed out.
+type FileShareItem struct {
+ // FileShareProperties - The file share properties be listed out.
+ *FileShareProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareItem.
+func (fsi FileShareItem) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsi.FileShareProperties != nil {
+ objectMap["properties"] = fsi.FileShareProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileShareItem struct.
+func (fsi *FileShareItem) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileShareProperties FileShareProperties
+ err = json.Unmarshal(*v, &fileShareProperties)
+ if err != nil {
+ return err
+ }
+ fsi.FileShareProperties = &fileShareProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fsi.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fsi.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fsi.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsi.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileShareItems response schema. Contains list of shares returned, and if paging is requested or
+// required, a URL to next page of shares.
+type FileShareItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of file shares returned.
+ Value *[]FileShareItem `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of shares. Returned when total number of requested shares exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// FileShareItemsIterator provides access to a complete listing of FileShareItem values.
+type FileShareItemsIterator struct {
+ i int
+ page FileShareItemsPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *FileShareItemsIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *FileShareItemsIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter FileShareItemsIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter FileShareItemsIterator) Response() FileShareItems {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter FileShareItemsIterator) Value() FileShareItem {
+ if !iter.page.NotDone() {
+ return FileShareItem{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the FileShareItemsIterator type.
+func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
+ return FileShareItemsIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (fsi FileShareItems) IsEmpty() bool {
+ return fsi.Value == nil || len(*fsi.Value) == 0
+}
+
+// fileShareItemsPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (fsi FileShareItems) fileShareItemsPreparer(ctx context.Context) (*http.Request, error) {
+ if fsi.NextLink == nil || len(to.String(fsi.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(fsi.NextLink)))
+}
+
+// FileShareItemsPage contains a page of FileShareItem values.
+type FileShareItemsPage struct {
+ fn func(context.Context, FileShareItems) (FileShareItems, error)
+ fsi FileShareItems
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *FileShareItemsPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.fsi)
+ if err != nil {
+ return err
+ }
+ page.fsi = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *FileShareItemsPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page FileShareItemsPage) NotDone() bool {
+ return !page.fsi.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page FileShareItemsPage) Response() FileShareItems {
+ return page.fsi
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page FileShareItemsPage) Values() []FileShareItem {
+ if page.fsi.IsEmpty() {
+ return nil
+ }
+ return *page.fsi.Value
+}
+
+// Creates a new instance of the FileShareItemsPage type.
+func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
+ return FileShareItemsPage{fn: getNextPage}
+}
+
+// FileShareProperties the properties of the file share.
+type FileShareProperties struct {
+ // LastModifiedTime - READ-ONLY; Returns the date and time the share was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // Metadata - A name-value pair to associate with the share as metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ShareQuota - The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120).
+ ShareQuota *int32 `json:"shareQuota,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareProperties.
+func (fsp FileShareProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsp.Metadata != nil {
+ objectMap["metadata"] = fsp.Metadata
+ }
+ if fsp.ShareQuota != nil {
+ objectMap["shareQuota"] = fsp.ShareQuota
+ }
+ return json.Marshal(objectMap)
+}
+
+// GeoReplicationStats statistics related to replication for storage account's Blob, Table, Queue and File
+// services. It is only available when geo-redundant replication is enabled for the storage account.
+type GeoReplicationStats struct {
+ // Status - READ-ONLY; The status of the secondary location. Possible values are: - Live: Indicates that the secondary location is active and operational. - Bootstrap: Indicates initial synchronization from the primary location to the secondary location is in progress.This typically occurs when replication is first enabled. - Unavailable: Indicates that the secondary location is temporarily unavailable. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable'
+ Status GeoReplicationStatus `json:"status,omitempty"`
+ // LastSyncTime - READ-ONLY; All primary writes preceding this UTC date/time value are guaranteed to be available for read operations. Primary writes following this point in time may or may not be available for reads. Element may be default value if value of LastSyncTime is not available, this can happen if secondary is offline or we are in bootstrap.
+ LastSyncTime *date.Time `json:"lastSyncTime,omitempty"`
+ // CanFailover - READ-ONLY; A boolean flag which indicates whether or not account failover is supported for the account.
+ CanFailover *bool `json:"canFailover,omitempty"`
+}
+
+// Identity identity for the resource.
+type Identity struct {
+ // PrincipalID - READ-ONLY; The principal ID of resource identity.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // TenantID - READ-ONLY; The tenant ID of resource.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Type - The identity type.
+ Type *string `json:"type,omitempty"`
+}
+
+// ImmutabilityPolicy the ImmutabilityPolicy property of a blob container, including Id, resource name,
+// resource type, Etag.
+type ImmutabilityPolicy struct {
+ autorest.Response `json:"-"`
+ // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container.
+ *ImmutabilityPolicyProperty `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImmutabilityPolicy.
+func (IP ImmutabilityPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if IP.ImmutabilityPolicyProperty != nil {
+ objectMap["properties"] = IP.ImmutabilityPolicyProperty
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicy struct.
+func (IP *ImmutabilityPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var immutabilityPolicyProperty ImmutabilityPolicyProperty
+ err = json.Unmarshal(*v, &immutabilityPolicyProperty)
+ if err != nil {
+ return err
+ }
+ IP.ImmutabilityPolicyProperty = &immutabilityPolicyProperty
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ IP.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ IP.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ IP.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ IP.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ImmutabilityPolicyProperties the properties of an ImmutabilityPolicy of a blob container.
+type ImmutabilityPolicyProperties struct {
+ // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container.
+ *ImmutabilityPolicyProperty `json:"properties,omitempty"`
+ // Etag - READ-ONLY; ImmutabilityPolicy Etag.
+ Etag *string `json:"etag,omitempty"`
+ // UpdateHistory - READ-ONLY; The ImmutabilityPolicy update history of the blob container.
+ UpdateHistory *[]UpdateHistoryProperty `json:"updateHistory,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperties.
+func (ipp ImmutabilityPolicyProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ipp.ImmutabilityPolicyProperty != nil {
+ objectMap["properties"] = ipp.ImmutabilityPolicyProperty
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicyProperties struct.
+func (ipp *ImmutabilityPolicyProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var immutabilityPolicyProperty ImmutabilityPolicyProperty
+ err = json.Unmarshal(*v, &immutabilityPolicyProperty)
+ if err != nil {
+ return err
+ }
+ ipp.ImmutabilityPolicyProperty = &immutabilityPolicyProperty
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ ipp.Etag = &etag
+ }
+ case "updateHistory":
+ if v != nil {
+ var updateHistory []UpdateHistoryProperty
+ err = json.Unmarshal(*v, &updateHistory)
+ if err != nil {
+ return err
+ }
+ ipp.UpdateHistory = &updateHistory
+ }
+ }
+ }
+
+ return nil
+}
+
+// ImmutabilityPolicyProperty the properties of an ImmutabilityPolicy of a blob container.
+type ImmutabilityPolicyProperty struct {
+ // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days.
+ ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"`
+ // State - READ-ONLY; The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. Possible values include: 'Locked', 'Unlocked'
+ State ImmutabilityPolicyState `json:"state,omitempty"`
+}
+
+// IPRule IP rule with specific IP or IP range in CIDR format.
+type IPRule struct {
+ // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed.
+ IPAddressOrRange *string `json:"value,omitempty"`
+ // Action - The action of IP ACL rule. Possible values include: 'Allow'
+ Action Action `json:"action,omitempty"`
+}
+
+// KeyVaultProperties properties of key vault.
+type KeyVaultProperties struct {
+ // KeyName - The name of KeyVault key.
+ KeyName *string `json:"keyname,omitempty"`
+ // KeyVersion - The version of KeyVault key.
+ KeyVersion *string `json:"keyversion,omitempty"`
+ // KeyVaultURI - The Uri of KeyVault.
+ KeyVaultURI *string `json:"keyvaulturi,omitempty"`
+}
+
+// LeaseContainerRequest lease Container request schema.
+type LeaseContainerRequest struct {
+ // Action - Specifies the lease action. Can be one of the available actions. Possible values include: 'Acquire', 'Renew', 'Change', 'Release', 'Break'
+ Action Action1 `json:"action,omitempty"`
+ // LeaseID - Identifies the lease. Can be specified in any valid GUID string format.
+ LeaseID *string `json:"leaseId,omitempty"`
+ // BreakPeriod - Optional. For a break action, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60.
+ BreakPeriod *int32 `json:"breakPeriod,omitempty"`
+ // LeaseDuration - Required for acquire. Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires.
+ LeaseDuration *int32 `json:"leaseDuration,omitempty"`
+ // ProposedLeaseID - Optional for acquire, required for change. Proposed lease ID, in a GUID string format.
+ ProposedLeaseID *string `json:"proposedLeaseId,omitempty"`
+}
+
+// LeaseContainerResponse lease Container response schema.
+type LeaseContainerResponse struct {
+ autorest.Response `json:"-"`
+ // LeaseID - Returned unique lease ID that must be included with any request to delete the container, or to renew, change, or release the lease.
+ LeaseID *string `json:"leaseId,omitempty"`
+ // LeaseTimeSeconds - Approximate time remaining in the lease period, in seconds.
+ LeaseTimeSeconds *string `json:"leaseTimeSeconds,omitempty"`
+}
+
+// LegalHold the LegalHold property of a blob container.
+type LegalHold struct {
+ autorest.Response `json:"-"`
+ // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account.
+ HasLegalHold *bool `json:"hasLegalHold,omitempty"`
+ // Tags - Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case at SRP.
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// LegalHoldProperties the LegalHold property of a blob container.
+type LegalHoldProperties struct {
+ // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account.
+ HasLegalHold *bool `json:"hasLegalHold,omitempty"`
+ // Tags - The list of LegalHold tags of a blob container.
+ Tags *[]TagProperty `json:"tags,omitempty"`
+}
+
+// ListAccountSasResponse the List SAS credentials operation response.
+type ListAccountSasResponse struct {
+ autorest.Response `json:"-"`
+ // AccountSasToken - READ-ONLY; List SAS credentials of storage account.
+ AccountSasToken *string `json:"accountSasToken,omitempty"`
+}
+
+// ListContainerItem the blob container properties be listed out.
+type ListContainerItem struct {
+ // ContainerProperties - The blob container properties be listed out.
+ *ContainerProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ListContainerItem.
+func (lci ListContainerItem) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lci.ContainerProperties != nil {
+ objectMap["properties"] = lci.ContainerProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ListContainerItem struct.
+func (lci *ListContainerItem) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var containerProperties ContainerProperties
+ err = json.Unmarshal(*v, &containerProperties)
+ if err != nil {
+ return err
+ }
+ lci.ContainerProperties = &containerProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ lci.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ lci.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ lci.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ lci.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ListContainerItems response schema. Contains list of blobs returned, and if paging is requested or
+// required, a URL to next page of containers.
+type ListContainerItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of blobs containers returned.
+ Value *[]ListContainerItem `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of containers. Returned when total number of requested containers exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListContainerItemsIterator provides access to a complete listing of ListContainerItem values.
+type ListContainerItemsIterator struct {
+ i int
+ page ListContainerItemsPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListContainerItemsIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListContainerItemsIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListContainerItemsIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListContainerItemsIterator) Response() ListContainerItems {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListContainerItemsIterator) Value() ListContainerItem {
+ if !iter.page.NotDone() {
+ return ListContainerItem{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListContainerItemsIterator type.
+func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator {
+ return ListContainerItemsIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lci ListContainerItems) IsEmpty() bool {
+ return lci.Value == nil || len(*lci.Value) == 0
+}
+
+// listContainerItemsPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lci ListContainerItems) listContainerItemsPreparer(ctx context.Context) (*http.Request, error) {
+ if lci.NextLink == nil || len(to.String(lci.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lci.NextLink)))
+}
+
+// ListContainerItemsPage contains a page of ListContainerItem values.
+type ListContainerItemsPage struct {
+ fn func(context.Context, ListContainerItems) (ListContainerItems, error)
+ lci ListContainerItems
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListContainerItemsPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lci)
+ if err != nil {
+ return err
+ }
+ page.lci = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListContainerItemsPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListContainerItemsPage) NotDone() bool {
+ return !page.lci.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListContainerItemsPage) Response() ListContainerItems {
+ return page.lci
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListContainerItemsPage) Values() []ListContainerItem {
+ if page.lci.IsEmpty() {
+ return nil
+ }
+ return *page.lci.Value
+}
+
+// Creates a new instance of the ListContainerItemsPage type.
+func NewListContainerItemsPage(getNextPage func(context.Context, ListContainerItems) (ListContainerItems, error)) ListContainerItemsPage {
+ return ListContainerItemsPage{fn: getNextPage}
+}
+
+// ListServiceSasResponse the List service SAS credentials operation response.
+type ListServiceSasResponse struct {
+ autorest.Response `json:"-"`
+ // ServiceSasToken - READ-ONLY; List service SAS credentials of specific resource.
+ ServiceSasToken *string `json:"serviceSasToken,omitempty"`
+}
+
+// ManagementPolicy the Get Storage Account ManagementPolicies operation response.
+type ManagementPolicy struct {
+ autorest.Response `json:"-"`
+ // ManagementPolicyProperties - Returns the Storage Account Data Policies Rules.
+ *ManagementPolicyProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagementPolicy.
+func (mp ManagementPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mp.ManagementPolicyProperties != nil {
+ objectMap["properties"] = mp.ManagementPolicyProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagementPolicy struct.
+func (mp *ManagementPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var managementPolicyProperties ManagementPolicyProperties
+ err = json.Unmarshal(*v, &managementPolicyProperties)
+ if err != nil {
+ return err
+ }
+ mp.ManagementPolicyProperties = &managementPolicyProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagementPolicyAction actions are applied to the filtered blobs when the execution condition is met.
+type ManagementPolicyAction struct {
+ // BaseBlob - The management policy action for base blob
+ BaseBlob *ManagementPolicyBaseBlob `json:"baseBlob,omitempty"`
+ // Snapshot - The management policy action for snapshot
+ Snapshot *ManagementPolicySnapShot `json:"snapshot,omitempty"`
+}
+
+// ManagementPolicyBaseBlob management policy action for base blob.
+type ManagementPolicyBaseBlob struct {
+ // TierToCool - The function to tier blobs to cool storage. Support blobs currently at Hot tier
+ TierToCool *DateAfterModification `json:"tierToCool,omitempty"`
+ // TierToArchive - The function to tier blobs to archive storage. Support blobs currently at Hot or Cool tier
+ TierToArchive *DateAfterModification `json:"tierToArchive,omitempty"`
+ // Delete - The function to delete the blob
+ Delete *DateAfterModification `json:"delete,omitempty"`
+}
+
+// ManagementPolicyDefinition an object that defines the Lifecycle rule. Each definition is made up with a
+// filters set and an actions set.
+type ManagementPolicyDefinition struct {
+ // Actions - An object that defines the action set.
+ Actions *ManagementPolicyAction `json:"actions,omitempty"`
+ // Filters - An object that defines the filter set.
+ Filters *ManagementPolicyFilter `json:"filters,omitempty"`
+}
+
+// ManagementPolicyFilter filters limit rule actions to a subset of blobs within the storage account. If
+// multiple filters are defined, a logical AND is performed on all filters.
+type ManagementPolicyFilter struct {
+ // PrefixMatch - An array of strings for prefixes to be match.
+ PrefixMatch *[]string `json:"prefixMatch,omitempty"`
+ // BlobTypes - An array of predefined enum values. Only blockBlob is supported.
+ BlobTypes *[]string `json:"blobTypes,omitempty"`
+}
+
+// ManagementPolicyProperties the Storage Account ManagementPolicy properties.
+type ManagementPolicyProperties struct {
+ // LastModifiedTime - READ-ONLY; Returns the date and time the ManagementPolicies was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // Policy - The Storage Account ManagementPolicy, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts.
+ Policy *ManagementPolicySchema `json:"policy,omitempty"`
+}
+
+// ManagementPolicyRule an object that wraps the Lifecycle rule. Each rule is uniquely defined by name.
+type ManagementPolicyRule struct {
+ // Enabled - Rule is enabled if set to true.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy.
+ Name *string `json:"name,omitempty"`
+ // Type - The valid value is Lifecycle
+ Type *string `json:"type,omitempty"`
+ // Definition - An object that defines the Lifecycle rule.
+ Definition *ManagementPolicyDefinition `json:"definition,omitempty"`
+}
+
+// ManagementPolicySchema the Storage Account ManagementPolicies Rules. See more details in:
+// https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts.
+type ManagementPolicySchema struct {
+ // Rules - The Storage Account ManagementPolicies Rules. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts.
+ Rules *[]ManagementPolicyRule `json:"rules,omitempty"`
+}
+
+// ManagementPolicySnapShot management policy action for snapshot.
+type ManagementPolicySnapShot struct {
+ // Delete - The function to delete the blob snapshot
+ Delete *DateAfterCreation `json:"delete,omitempty"`
+}
+
+// MetricSpecification metric specification of operation.
+type MetricSpecification struct {
+ // Name - Name of metric specification.
+ Name *string `json:"name,omitempty"`
+ // DisplayName - Display name of metric specification.
+ DisplayName *string `json:"displayName,omitempty"`
+ // DisplayDescription - Display description of metric specification.
+ DisplayDescription *string `json:"displayDescription,omitempty"`
+ // Unit - Unit could be Bytes or Count.
+ Unit *string `json:"unit,omitempty"`
+ // Dimensions - Dimensions of blobs, including blob type and access tier.
+ Dimensions *[]Dimension `json:"dimensions,omitempty"`
+ // AggregationType - Aggregation type could be Average.
+ AggregationType *string `json:"aggregationType,omitempty"`
+ // FillGapWithZero - The property to decide fill gap with zero or not.
+ FillGapWithZero *bool `json:"fillGapWithZero,omitempty"`
+ // Category - The category this metric specification belong to, could be Capacity.
+ Category *string `json:"category,omitempty"`
+ // ResourceIDDimensionNameOverride - Account Resource Id.
+ ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"`
+}
+
+// NetworkRuleSet network rule set
+type NetworkRuleSet struct {
+ // Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'None', 'Logging', 'Metrics', 'AzureServices'
+ Bypass Bypass `json:"bypass,omitempty"`
+ // VirtualNetworkRules - Sets the virtual network rules
+ VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"`
+ // IPRules - Sets the IP ACL rules
+ IPRules *[]IPRule `json:"ipRules,omitempty"`
+ // DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny'
+ DefaultAction DefaultAction `json:"defaultAction,omitempty"`
+}
+
+// Operation storage REST API operation definition.
+type Operation struct {
+ // Name - Operation name: {provider}/{resource}/{operation}
+ Name *string `json:"name,omitempty"`
+ // Display - Display metadata associated with the operation.
+ Display *OperationDisplay `json:"display,omitempty"`
+ // Origin - The origin of operations.
+ Origin *string `json:"origin,omitempty"`
+ // OperationProperties - Properties of operation, include metric specifications.
+ *OperationProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Operation.
+func (o Operation) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if o.Name != nil {
+ objectMap["name"] = o.Name
+ }
+ if o.Display != nil {
+ objectMap["display"] = o.Display
+ }
+ if o.Origin != nil {
+ objectMap["origin"] = o.Origin
+ }
+ if o.OperationProperties != nil {
+ objectMap["properties"] = o.OperationProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Operation struct.
+func (o *Operation) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ o.Name = &name
+ }
+ case "display":
+ if v != nil {
+ var display OperationDisplay
+ err = json.Unmarshal(*v, &display)
+ if err != nil {
+ return err
+ }
+ o.Display = &display
+ }
+ case "origin":
+ if v != nil {
+ var origin string
+ err = json.Unmarshal(*v, &origin)
+ if err != nil {
+ return err
+ }
+ o.Origin = &origin
+ }
+ case "properties":
+ if v != nil {
+ var operationProperties OperationProperties
+ err = json.Unmarshal(*v, &operationProperties)
+ if err != nil {
+ return err
+ }
+ o.OperationProperties = &operationProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// OperationDisplay display metadata associated with the operation.
+type OperationDisplay struct {
+ // Provider - Service provider: Microsoft Storage.
+ Provider *string `json:"provider,omitempty"`
+ // Resource - Resource on which the operation is performed etc.
+ Resource *string `json:"resource,omitempty"`
+ // Operation - Type of operation: get, read, delete, etc.
+ Operation *string `json:"operation,omitempty"`
+ // Description - Description of the operation.
+ Description *string `json:"description,omitempty"`
+}
+
+// OperationListResult result of the request to list Storage operations. It contains a list of operations
+// and a URL link to get the next set of results.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of Storage operations supported by the Storage resource provider.
+ Value *[]Operation `json:"value,omitempty"`
+}
+
+// OperationProperties properties of operation, include metric specifications.
+type OperationProperties struct {
+ // ServiceSpecification - One property of operation, include metric specifications.
+ ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"`
+}
+
+// PrivateEndpoint the Private Endpoint resource.
+type PrivateEndpoint struct {
+ // ID - READ-ONLY; The ARM identifier for Private Endpoint
+ ID *string `json:"id,omitempty"`
+}
+
+// PrivateEndpointConnection the Private Endpoint Connection resource.
+type PrivateEndpointConnection struct {
+ autorest.Response `json:"-"`
+ // PrivateEndpointConnectionProperties - Resource properties.
+ *PrivateEndpointConnectionProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateEndpointConnection.
+func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pec.PrivateEndpointConnectionProperties != nil {
+ objectMap["properties"] = pec.PrivateEndpointConnectionProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct.
+func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var privateEndpointConnectionProperties PrivateEndpointConnectionProperties
+ err = json.Unmarshal(*v, &privateEndpointConnectionProperties)
+ if err != nil {
+ return err
+ }
+ pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ pec.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ pec.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ pec.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties.
+type PrivateEndpointConnectionProperties struct {
+ // PrivateEndpoint - The resource of private end point.
+ PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"`
+ // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider.
+ PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"`
+ // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Failed'
+ ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// PrivateLinkResource a private link resource
+type PrivateLinkResource struct {
+ // PrivateLinkResourceProperties - Resource properties.
+ *PrivateLinkResourceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateLinkResource.
+func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if plr.PrivateLinkResourceProperties != nil {
+ objectMap["properties"] = plr.PrivateLinkResourceProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct.
+func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var privateLinkResourceProperties PrivateLinkResourceProperties
+ err = json.Unmarshal(*v, &privateLinkResourceProperties)
+ if err != nil {
+ return err
+ }
+ plr.PrivateLinkResourceProperties = &privateLinkResourceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ plr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ plr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ plr.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// PrivateLinkResourceListResult a list of private link resources
+type PrivateLinkResourceListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Array of private link resources
+ Value *[]PrivateLinkResource `json:"value,omitempty"`
+}
+
+// PrivateLinkResourceProperties properties of a private link resource.
+type PrivateLinkResourceProperties struct {
+ // GroupID - READ-ONLY; The private link resource group id.
+ GroupID *string `json:"groupId,omitempty"`
+ // RequiredMembers - READ-ONLY; The private link resource required member names.
+ RequiredMembers *[]string `json:"requiredMembers,omitempty"`
+ // RequiredZoneNames - The private link resource Private link DNS zone name.
+ RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"`
+}
+
+// PrivateLinkServiceConnectionState a collection of information about the state of the connection between
+// service consumer and provider.
+type PrivateLinkServiceConnectionState struct {
+ // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'Pending', 'Approved', 'Rejected'
+ Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"`
+ // Description - The reason for approval/rejection of the connection.
+ Description *string `json:"description,omitempty"`
+ // ActionRequired - A message indicating if changes on the service provider require any updates on the consumer.
+ ActionRequired *string `json:"actionRequired,omitempty"`
+}
+
+// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than
+// required location and tags
+type ProxyResource struct {
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// Resource ...
+type Resource struct {
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// Restriction the restriction because of which SKU cannot be used.
+type Restriction struct {
+ // Type - READ-ONLY; The type of restrictions. As of now only possible value for this is location.
+ Type *string `json:"type,omitempty"`
+ // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
+ Values *[]string `json:"values,omitempty"`
+ // ReasonCode - The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. Possible values include: 'QuotaID', 'NotAvailableForSubscription'
+ ReasonCode ReasonCode `json:"reasonCode,omitempty"`
+}
+
+// ServiceSasParameters the parameters to list service SAS credentials of a specific resource.
+type ServiceSasParameters struct {
+ // CanonicalizedResource - The canonical path to the signed resource.
+ CanonicalizedResource *string `json:"canonicalizedResource,omitempty"`
+ // Resource - The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). Possible values include: 'SignedResourceB', 'SignedResourceC', 'SignedResourceF', 'SignedResourceS'
+ Resource SignedResource `json:"signedResource,omitempty"`
+ // Permissions - The signed permissions for the service SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'R', 'D', 'W', 'L', 'A', 'C', 'U', 'P'
+ Permissions Permissions `json:"signedPermission,omitempty"`
+ // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests.
+ IPAddressOrRange *string `json:"signedIp,omitempty"`
+ // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'Httpshttp', 'HTTPS'
+ Protocols HTTPProtocol `json:"signedProtocol,omitempty"`
+ // SharedAccessStartTime - The time at which the SAS becomes valid.
+ SharedAccessStartTime *date.Time `json:"signedStart,omitempty"`
+ // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid.
+ SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"`
+ // Identifier - A unique value up to 64 characters in length that correlates to an access policy specified for the container, queue, or table.
+ Identifier *string `json:"signedIdentifier,omitempty"`
+ // PartitionKeyStart - The start of partition key.
+ PartitionKeyStart *string `json:"startPk,omitempty"`
+ // PartitionKeyEnd - The end of partition key.
+ PartitionKeyEnd *string `json:"endPk,omitempty"`
+ // RowKeyStart - The start of row key.
+ RowKeyStart *string `json:"startRk,omitempty"`
+ // RowKeyEnd - The end of row key.
+ RowKeyEnd *string `json:"endRk,omitempty"`
+ // KeyToSign - The key to sign the account SAS token with.
+ KeyToSign *string `json:"keyToSign,omitempty"`
+ // CacheControl - The response header override for cache control.
+ CacheControl *string `json:"rscc,omitempty"`
+ // ContentDisposition - The response header override for content disposition.
+ ContentDisposition *string `json:"rscd,omitempty"`
+ // ContentEncoding - The response header override for content encoding.
+ ContentEncoding *string `json:"rsce,omitempty"`
+ // ContentLanguage - The response header override for content language.
+ ContentLanguage *string `json:"rscl,omitempty"`
+ // ContentType - The response header override for content type.
+ ContentType *string `json:"rsct,omitempty"`
+}
+
+// ServiceSpecification one property of operation, include metric specifications.
+type ServiceSpecification struct {
+ // MetricSpecifications - Metric specifications of operation.
+ MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"`
+}
+
+// Sku the SKU of the storage account.
+type Sku struct {
+ // Name - Gets or sets the SKU name. Required for account creation; optional for update. Note that in older versions, SKU name was called accountType. Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS'
+ Name SkuName `json:"name,omitempty"`
+ // Tier - READ-ONLY; Gets the SKU tier. This is based on the SKU name. Possible values include: 'Standard', 'Premium'
+ Tier SkuTier `json:"tier,omitempty"`
+ // ResourceType - READ-ONLY; The type of the resource, usually it is 'storageAccounts'.
+ ResourceType *string `json:"resourceType,omitempty"`
+ // Kind - READ-ONLY; Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage'
+ Kind Kind `json:"kind,omitempty"`
+ // Locations - READ-ONLY; The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.).
+ Locations *[]string `json:"locations,omitempty"`
+ // Capabilities - READ-ONLY; The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc.
+ Capabilities *[]SKUCapability `json:"capabilities,omitempty"`
+ // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions.
+ Restrictions *[]Restriction `json:"restrictions,omitempty"`
+}
+
+// SKUCapability the capability information in the specified SKU, including file encryption, network ACLs,
+// change notification, etc.
+type SKUCapability struct {
+ // Name - READ-ONLY; The name of capability, The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc.
+ Name *string `json:"name,omitempty"`
+ // Value - READ-ONLY; A string value to indicate states of given capability. Possibly 'true' or 'false'.
+ Value *string `json:"value,omitempty"`
+}
+
+// SkuListResult the response from the List Storage SKUs operation.
+type SkuListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; Get the list result of storage SKUs and their properties.
+ Value *[]Sku `json:"value,omitempty"`
+}
+
+// TagProperty a tag of the LegalHold of a blob container.
+type TagProperty struct {
+ // Tag - READ-ONLY; The tag value.
+ Tag *string `json:"tag,omitempty"`
+ // Timestamp - READ-ONLY; Returns the date and time the tag was added.
+ Timestamp *date.Time `json:"timestamp,omitempty"`
+ // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who added the tag.
+ ObjectIdentifier *string `json:"objectIdentifier,omitempty"`
+ // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who added the tag.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Upn - READ-ONLY; Returns the User Principal Name of the user who added the tag.
+ Upn *string `json:"upn,omitempty"`
+}
+
+// TrackedResource the resource model definition for a ARM tracked top level resource
+type TrackedResource struct {
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Location - The geo-location where the resource lives
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for TrackedResource.
+func (tr TrackedResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if tr.Tags != nil {
+ objectMap["tags"] = tr.Tags
+ }
+ if tr.Location != nil {
+ objectMap["location"] = tr.Location
+ }
+ return json.Marshal(objectMap)
+}
+
+// UpdateHistoryProperty an update history of the ImmutabilityPolicy of a blob container.
+type UpdateHistoryProperty struct {
+ // Update - READ-ONLY; The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. Possible values include: 'Put', 'Lock', 'Extend'
+ Update ImmutabilityPolicyUpdateType `json:"update,omitempty"`
+ // ImmutabilityPeriodSinceCreationInDays - READ-ONLY; The immutability period for the blobs in the container since the policy creation, in days.
+ ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"`
+ // Timestamp - READ-ONLY; Returns the date and time the ImmutabilityPolicy was updated.
+ Timestamp *date.Time `json:"timestamp,omitempty"`
+ // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who updated the ImmutabilityPolicy.
+ ObjectIdentifier *string `json:"objectIdentifier,omitempty"`
+ // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who updated the ImmutabilityPolicy.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Upn - READ-ONLY; Returns the User Principal Name of the user who updated the ImmutabilityPolicy.
+ Upn *string `json:"upn,omitempty"`
+}
+
+// Usage describes Storage Resource Usage.
+type Usage struct {
+ // Unit - READ-ONLY; Gets the unit of measurement. Possible values include: 'Count', 'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
+ Unit UsageUnit `json:"unit,omitempty"`
+ // CurrentValue - READ-ONLY; Gets the current count of the allocated resources in the subscription.
+ CurrentValue *int32 `json:"currentValue,omitempty"`
+ // Limit - READ-ONLY; Gets the maximum count of the resources that can be allocated in the subscription.
+ Limit *int32 `json:"limit,omitempty"`
+ // Name - READ-ONLY; Gets the name of the type of usage.
+ Name *UsageName `json:"name,omitempty"`
+}
+
+// UsageListResult the response from the List Usages operation.
+type UsageListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Gets or sets the list of Storage Resource Usages.
+ Value *[]Usage `json:"value,omitempty"`
+}
+
+// UsageName the usage names that can be used; currently limited to StorageAccount.
+type UsageName struct {
+ // Value - READ-ONLY; Gets a string describing the resource name.
+ Value *string `json:"value,omitempty"`
+ // LocalizedValue - READ-ONLY; Gets a localized string describing the resource name.
+ LocalizedValue *string `json:"localizedValue,omitempty"`
+}
+
+// VirtualNetworkRule virtual Network rule.
+type VirtualNetworkRule struct {
+ // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
+ VirtualNetworkResourceID *string `json:"id,omitempty"`
+ // Action - The action of virtual network rule. Possible values include: 'Allow'
+ Action Action `json:"action,omitempty"`
+ // State - Gets the state of virtual network rule. Possible values include: 'StateProvisioning', 'StateDeprovisioning', 'StateSucceeded', 'StateFailed', 'StateNetworkSourceDeleted'
+ State State `json:"state,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/operations.go
new file mode 100644
index 0000000..cd01bf1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/operations.go
@@ -0,0 +1,109 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the the Azure Storage Management API.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available Storage Rest API operations.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Storage/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privateendpointconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privateendpointconnections.go
new file mode 100644
index 0000000..4cad9cb
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privateendpointconnections.go
@@ -0,0 +1,332 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PrivateEndpointConnectionsClient is the the Azure Storage Management API.
+type PrivateEndpointConnectionsClient struct {
+ BaseClient
+}
+
+// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client.
+func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient {
+ return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client.
+func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient {
+ return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Delete deletes the specified private endpoint connection associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// privateEndpointConnectionName - the name of the private endpoint connection associated with the Storage
+// Account
+func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the specified private endpoint connection associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// privateEndpointConnectionName - the name of the private endpoint connection associated with the Storage
+// Account
+func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Put update the state of specified private endpoint connection associated with the storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// privateEndpointConnectionName - the name of the private endpoint connection associated with the Storage
+// Account
+// properties - the private endpoint connection properties.
+func (client PrivateEndpointConnectionsClient) Put(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (result PrivateEndpointConnection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Put")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: properties,
+ Constraints: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Put", err.Error())
+ }
+
+ req, err := client.PutPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName, properties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PutSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PutResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// PutPreparer prepares the Put request.
+func (client PrivateEndpointConnectionsClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
+ autorest.WithJSON(properties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PutSender sends the Put request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateEndpointConnectionsClient) PutSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// PutResponder handles the response to the Put request. The method always
+// closes the http.Response Body.
+func (client PrivateEndpointConnectionsClient) PutResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privatelinkresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privatelinkresources.go
new file mode 100644
index 0000000..369db69
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privatelinkresources.go
@@ -0,0 +1,134 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PrivateLinkResourcesClient is the the Azure Storage Management API.
+type PrivateLinkResourcesClient struct {
+ BaseClient
+}
+
+// NewPrivateLinkResourcesClient creates an instance of the PrivateLinkResourcesClient client.
+func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient {
+ return NewPrivateLinkResourcesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPrivateLinkResourcesClientWithBaseURI creates an instance of the PrivateLinkResourcesClient client.
+func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient {
+ return PrivateLinkResourcesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ListByStorageAccount gets the private link resources that need to be created for a storage account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client PrivateLinkResourcesClient) ListByStorageAccount(ctx context.Context, resourceGroupName string, accountName string) (result PrivateLinkResourceListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrivateLinkResourcesClient.ListByStorageAccount")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.PrivateLinkResourcesClient", "ListByStorageAccount", err.Error())
+ }
+
+ req, err := client.ListByStorageAccountPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByStorageAccountSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByStorageAccountResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByStorageAccountPreparer prepares the ListByStorageAccount request.
+func (client PrivateLinkResourcesClient) ListByStorageAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByStorageAccountSender sends the ListByStorageAccount request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateLinkResourcesClient) ListByStorageAccountSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByStorageAccountResponder handles the response to the ListByStorageAccount request. The method always
+// closes the http.Response Body.
+func (client PrivateLinkResourcesClient) ListByStorageAccountResponder(resp *http.Response) (result PrivateLinkResourceListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/skus.go
new file mode 100644
index 0000000..db346f8
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/skus.go
@@ -0,0 +1,120 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// SkusClient is the the Azure Storage Management API.
+type SkusClient struct {
+ BaseClient
+}
+
+// NewSkusClient creates an instance of the SkusClient client.
+func NewSkusClient(subscriptionID string) SkusClient {
+ return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewSkusClientWithBaseURI creates an instance of the SkusClient client.
+func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient {
+ return SkusClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists the available SKUs supported by Microsoft.Storage for given subscription.
+func (client SkusClient) List(ctx context.Context) (result SkuListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SkusClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.SkusClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client SkusClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/usages.go
new file mode 100644
index 0000000..60d0d4b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/usages.go
@@ -0,0 +1,123 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// UsagesClient is the the Azure Storage Management API.
+type UsagesClient struct {
+ BaseClient
+}
+
+// NewUsagesClient creates an instance of the UsagesClient client.
+func NewUsagesClient(subscriptionID string) UsagesClient {
+ return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client.
+func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
+ return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ListByLocation gets the current usage count and the limit for the resources of the location under the subscription.
+// Parameters:
+// location - the location of the Azure Storage resource.
+func (client UsagesClient) ListByLocation(ctx context.Context, location string) (result UsageListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.ListByLocation")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.UsagesClient", "ListByLocation", err.Error())
+ }
+
+ req, err := client.ListByLocationPreparer(ctx, location)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByLocationSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByLocationResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByLocationPreparer prepares the ListByLocation request.
+func (client UsagesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByLocationSender sends the ListByLocation request. The method will close the
+// http.Response Body if it receives an error.
+func (client UsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByLocationResponder handles the response to the ListByLocation request. The method always
+// closes the http.Response Body.
+func (client UsagesClient) ListByLocationResponder(resp *http.Response) (result UsageListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/version.go
new file mode 100644
index 0000000..7f2a6a7
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/version.go
@@ -0,0 +1,30 @@
+package storage
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " storage/2019-06-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
new file mode 100644
index 0000000..e92b783
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
@@ -0,0 +1,21 @@
+package version
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// Number contains the semantic version of this SDK.
+const Number = "v36.1.0"
diff --git a/vendor/github.com/Azure/azure-storage-file-go/LICENSE b/vendor/github.com/Azure/azure-storage-file-go/LICENSE
new file mode 100644
index 0000000..d1ca00f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/highlevel.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/highlevel.go
new file mode 100644
index 0000000..090abbf
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/highlevel.go
@@ -0,0 +1,315 @@
+package azfile
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "bytes"
+ "os"
+ "sync"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+const (
+ // defaultParallelCount specifies default parallel count will be used by parallel upload/download methods
+ defaultParallelCount = 5
+
+ // fileSegmentSize specifies file segment size that file would be splitted into during parallel upload/download
+ fileSegmentSize = 500 * 1024 * 1024
+)
+
+// UploadToAzureFileOptions identifies options used by the UploadBufferToAzureFile and UploadFileToAzureFile functions.
+type UploadToAzureFileOptions struct {
+ // RangeSize specifies the range size to use in each parallel upload; the default (and maximum size) is FileMaxUploadRangeBytes.
+ RangeSize int64
+
+ // Progress is a function that is invoked periodically as bytes are send in a UploadRange call to the FileURL.
+ Progress pipeline.ProgressReceiver
+
+ // Parallelism indicates the maximum number of ranges to upload in parallel. If 0(default) is provided, 5 parallelism will be used by default.
+ Parallelism uint16
+
+ // FileHTTPHeaders contains read/writeable file properties.
+ FileHTTPHeaders FileHTTPHeaders
+
+ // Metadata contains metadata key/value pairs.
+ Metadata Metadata
+}
+
+// UploadBufferToAzureFile uploads a buffer to an Azure file.
+// Note: o.RangeSize must be >= 0 and <= FileMaxUploadRangeBytes, and if not specified, method will use FileMaxUploadRangeBytes by default.
+// The total size to be uploaded should be <= FileMaxSizeInBytes.
+func UploadBufferToAzureFile(ctx context.Context, b []byte,
+ fileURL FileURL, o UploadToAzureFileOptions) error {
+
+ // 1. Validate parameters, and set defaults.
+ if o.RangeSize < 0 || o.RangeSize > FileMaxUploadRangeBytes {
+ return fmt.Errorf("invalid argument, o.RangeSize must be >= 0 and <= %d, in bytes", FileMaxUploadRangeBytes)
+ }
+ if o.RangeSize == 0 {
+ o.RangeSize = FileMaxUploadRangeBytes
+ }
+
+ size := int64(len(b))
+
+ parallelism := o.Parallelism
+ if parallelism == 0 {
+ parallelism = defaultParallelCount // default parallelism
+ }
+
+ // 2. Try to create the Azure file.
+ _, err := fileURL.Create(ctx, size, o.FileHTTPHeaders, o.Metadata)
+ if err != nil {
+ return err
+ }
+ // If size equals to 0, upload nothing and directly return.
+ if size == 0 {
+ return nil
+ }
+
+ // 3. Prepare and do parallel upload.
+ fileProgress := int64(0)
+ progressLock := &sync.Mutex{}
+
+ return doBatchTransfer(ctx, batchTransferOptions{
+ transferSize: size,
+ chunkSize: o.RangeSize,
+ parallelism: parallelism,
+ operation: func(offset int64, curRangeSize int64) error {
+ // Prepare to read the proper section of the buffer.
+ var body io.ReadSeeker = bytes.NewReader(b[offset : offset+curRangeSize])
+ if o.Progress != nil {
+ rangeProgress := int64(0)
+ body = pipeline.NewRequestBodyProgress(body,
+ func(bytesTransferred int64) {
+ diff := bytesTransferred - rangeProgress
+ rangeProgress = bytesTransferred
+ progressLock.Lock()
+ defer progressLock.Unlock()
+ fileProgress += diff
+ o.Progress(fileProgress)
+ })
+ }
+
+ _, err := fileURL.UploadRange(ctx, int64(offset), body, nil)
+ return err
+ },
+ operationName: "UploadBufferToAzureFile",
+ })
+}
+
+// UploadFileToAzureFile uploads a local file to an Azure file.
+func UploadFileToAzureFile(ctx context.Context, file *os.File,
+ fileURL FileURL, o UploadToAzureFileOptions) error {
+
+ stat, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ m := mmf{} // Default to an empty slice; used for 0-size file
+ if stat.Size() != 0 {
+ m, err = newMMF(file, false, 0, int(stat.Size()))
+ if err != nil {
+ return err
+ }
+ defer m.unmap()
+ }
+ return UploadBufferToAzureFile(ctx, m, fileURL, o)
+}
+
+// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
+type DownloadFromAzureFileOptions struct {
+ // RangeSize specifies the range size to use in each parallel download; the default is FileMaxUploadRangeBytes.
+ RangeSize int64
+
+ // Progress is a function that is invoked periodically as bytes are recieved.
+ Progress pipeline.ProgressReceiver
+
+ // Parallelism indicates the maximum number of ranges to download in parallel. If 0(default) is provided, 5 parallelism will be used by default.
+ Parallelism uint16
+
+ // Max retry requests used during reading data for each range.
+ MaxRetryRequestsPerRange int
+}
+
+// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
+// Note: o.RangeSize must be >= 0.
+func downloadAzureFileToBuffer(ctx context.Context, fileURL FileURL, azfileProperties *FileGetPropertiesResponse,
+ b []byte, o DownloadFromAzureFileOptions) (*FileGetPropertiesResponse, error) {
+
+ // 1. Validate parameters, and set defaults.
+ if o.RangeSize < 0 {
+ return nil, errors.New("invalid argument, o.RangeSize must be >= 0")
+ }
+ if o.RangeSize == 0 {
+ o.RangeSize = FileMaxUploadRangeBytes
+ }
+
+ if azfileProperties == nil {
+ p, err := fileURL.GetProperties(ctx)
+ if err != nil {
+ return nil, err
+ }
+ azfileProperties = p
+ }
+ azfileSize := azfileProperties.ContentLength()
+
+ // If azure file size equals to 0, directly return as nothing need be downloaded.
+ if azfileSize == 0 {
+ return azfileProperties, nil
+ }
+
+ if int64(len(b)) < azfileSize {
+ sanityCheckFailed(fmt.Sprintf("The buffer's size should be equal to or larger than Azure file's size: %d.", azfileSize))
+ }
+
+ parallelism := o.Parallelism
+ if parallelism == 0 {
+ parallelism = defaultParallelCount // default parallelism
+ }
+
+ // 2. Prepare and do parallel download.
+ fileProgress := int64(0)
+ progressLock := &sync.Mutex{}
+
+ err := doBatchTransfer(ctx, batchTransferOptions{
+ transferSize: azfileSize,
+ chunkSize: o.RangeSize,
+ parallelism: parallelism,
+ operation: func(offset int64, curRangeSize int64) error {
+ dr, err := fileURL.Download(ctx, offset, curRangeSize, false)
+ body := dr.Body(RetryReaderOptions{MaxRetryRequests: o.MaxRetryRequestsPerRange})
+
+ if o.Progress != nil {
+ rangeProgress := int64(0)
+ body = pipeline.NewResponseBodyProgress(
+ body,
+ func(bytesTransferred int64) {
+ diff := bytesTransferred - rangeProgress
+ rangeProgress = bytesTransferred
+ progressLock.Lock()
+ defer progressLock.Unlock()
+ fileProgress += diff
+ o.Progress(fileProgress)
+ })
+ }
+
+ _, err = io.ReadFull(body, b[offset:offset+curRangeSize])
+ body.Close()
+
+ return err
+ },
+ operationName: "downloadAzureFileToBuffer",
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return azfileProperties, nil
+}
+
+// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
+func DownloadAzureFileToBuffer(ctx context.Context, fileURL FileURL,
+ b []byte, o DownloadFromAzureFileOptions) (*FileGetPropertiesResponse, error) {
+ return downloadAzureFileToBuffer(ctx, fileURL, nil, b, o)
+}
+
+// DownloadAzureFileToFile downloads an Azure file to a local file.
+// The file would be created if it doesn't exist, and would be truncated if the size doesn't match.
+// Note: file can't be nil.
+func DownloadAzureFileToFile(ctx context.Context, fileURL FileURL, file *os.File, o DownloadFromAzureFileOptions) (*FileGetPropertiesResponse, error) {
+ // 1. Validate parameters.
+ if file == nil {
+ return nil, errors.New("invalid argument, file can't be nil")
+ }
+
+ // 2. Try to get Azure file's size.
+ azfileProperties, err := fileURL.GetProperties(ctx)
+ if err != nil {
+ return nil, err
+ }
+ azfileSize := azfileProperties.ContentLength()
+
+ // 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if stat.Size() != azfileSize {
+ if err = file.Truncate(azfileSize); err != nil {
+ return nil, err
+ }
+ }
+
+ // 4. Set mmap and call DownloadAzureFileToBuffer, in this case file size should be > 0.
+ m := mmf{} // Default to an empty slice; used for 0-size file
+ if azfileSize > 0 {
+ m, err = newMMF(file, true, 0, int(azfileSize))
+ if err != nil {
+ return nil, err
+ }
+ defer m.unmap()
+ }
+
+ return downloadAzureFileToBuffer(ctx, fileURL, azfileProperties, m, o)
+}
+
+// BatchTransferOptions identifies options used by doBatchTransfer.
+type batchTransferOptions struct {
+ transferSize int64
+ chunkSize int64
+ parallelism uint16
+ operation func(offset int64, chunkSize int64) error
+ operationName string
+}
+
+// doBatchTransfer helps to execute operations in a batch manner.
+func doBatchTransfer(ctx context.Context, o batchTransferOptions) error {
+ // Prepare and do parallel operations.
+ numChunks := ((o.transferSize - 1) / o.chunkSize) + 1
+ operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently
+ operationResponseChannel := make(chan error, numChunks) // Holds each response
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Create the goroutines that process each operation (in parallel).
+ for g := uint16(0); g < o.parallelism; g++ {
+ //grIndex := g
+ go func() {
+ for f := range operationChannel {
+ //fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex)
+ err := f()
+ operationResponseChannel <- err
+ //fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex)
+ }
+ }()
+ }
+
+ curChunkSize := o.chunkSize
+ // Add each chunk's operation to the channel.
+ for chunkIndex := int64(0); chunkIndex < numChunks; chunkIndex++ {
+ if chunkIndex == numChunks-1 { // Last chunk
+ curChunkSize = o.transferSize - (int64(chunkIndex) * o.chunkSize) // Remove size of all transferred chunks from total
+ }
+ offset := int64(chunkIndex) * o.chunkSize
+
+ closureChunkSize := curChunkSize
+ operationChannel <- func() error {
+ return o.operation(offset, closureChunkSize)
+ }
+ }
+ close(operationChannel)
+
+ // Wait for the operations to complete.
+ for chunkIndex := int64(0); chunkIndex < numChunks; chunkIndex++ {
+ responseError := <-operationResponseChannel
+ if responseError != nil {
+ cancel() // As soon as any operation fails, cancel all remaining operation calls
+ return responseError // No need to process anymore responses
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/parsing_urls.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/parsing_urls.go
new file mode 100644
index 0000000..b409181
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/parsing_urls.go
@@ -0,0 +1,159 @@
+package azfile
+
+import (
+ "net"
+ "net/url"
+ "strings"
+)
+
+const (
+ shareSnapshot = "sharesnapshot"
+)
+
+// A FileURLParts object represents the components that make up an Azure Storage Share/Directory/File URL. You parse an
+// existing URL into its parts by calling NewFileURLParts(). You construct a URL from parts by calling URL().
+// NOTE: Changing any SAS-related field requires computing a new SAS signature.
+type FileURLParts struct {
+ Scheme string // Ex: "https://"
+ Host string // Ex: "account.share.core.windows.net", "10.132.141.33", "10.132.141.33:80"
+ ShareName string // Share name, Ex: "myshare"
+ DirectoryOrFilePath string // Path of directory or file, Ex: "mydirectory/myfile"
+ ShareSnapshot string // IsZero is true if not a snapshot
+ SAS SASQueryParameters
+ UnparsedParams string
+ IPEndpointStyleInfo IPEndpointStyleInfo // Useful Parts for IP endpoint style URL.
+}
+
+// IPEndpointStyleInfo is used for IP endpoint style URL.
+// It's commonly used when working with Azure storage emulator or testing environments.
+// Ex: "https://10.132.141.33/accountname/sharename"
+type IPEndpointStyleInfo struct {
+ AccountName string // "" if not using IP endpoint style
+}
+
+// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
+// http(s)://IP(:port)/storageaccount/share(||container||etc)/...
+// As url's Host property, host could be both host or host:port
+func isIPEndpointStyle(host string) bool {
+ if host == "" {
+ return false
+ }
+
+ if h, _, err := net.SplitHostPort(host); err == nil {
+ host = h
+ }
+ // For IPv6, there could be case where SplitHostPort fails for cannot finding port.
+ // In this case, eliminate the '[' and ']' in the URL.
+ // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
+ if host[0] == '[' && host[len(host)-1] == ']' {
+ host = host[1 : len(host)-1]
+ }
+ return net.ParseIP(host) != nil
+}
+
+// NewFileURLParts parses a URL initializing FileURLParts' fields including any SAS-related & sharesnapshot query parameters. Any other
+// query parameters remain in the UnparsedParams field. This method overwrites all fields in the FileURLParts object.
+func NewFileURLParts(u url.URL) FileURLParts {
+ up := FileURLParts{
+ Scheme: u.Scheme,
+ Host: u.Host,
+ IPEndpointStyleInfo: IPEndpointStyleInfo{},
+ }
+
+ if u.Path != "" {
+ path := u.Path
+
+ if path[0] == '/' {
+ path = path[1:]
+ }
+
+ if isIPEndpointStyle(up.Host) {
+ if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no share, path of directory or file
+ up.IPEndpointStyleInfo.AccountName = path
+ } else {
+ up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
+
+ path = path[accountEndIndex+1:]
+ // Find the next slash (if it exists)
+ if shareEndIndex := strings.Index(path, "/"); shareEndIndex == -1 { // Slash not found; path has share name & no path of directory or file
+ up.ShareName = path
+ } else { // Slash found; path has share name & path of directory or file
+ up.ShareName = path[:shareEndIndex]
+ up.DirectoryOrFilePath = path[shareEndIndex+1:]
+ }
+ }
+ } else {
+ // Find the next slash (if it exists)
+ if shareEndIndex := strings.Index(path, "/"); shareEndIndex == -1 { // Slash not found; path has share name & no path of directory or file
+ up.ShareName = path
+ } else { // Slash found; path has share name & path of directory or file
+ up.ShareName = path[:shareEndIndex]
+ up.DirectoryOrFilePath = path[shareEndIndex+1:]
+ }
+ }
+ }
+
+ // Convert the query parameters to a case-sensitive map & trim whitespace
+ paramsMap := u.Query()
+
+ if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(shareSnapshot); ok {
+ up.ShareSnapshot = snapshotStr[0]
+ // If we recognized the query parameter, remove it from the map
+ delete(paramsMap, shareSnapshot)
+ }
+ up.SAS = newSASQueryParameters(paramsMap, true)
+ up.UnparsedParams = paramsMap.Encode()
+ return up
+}
+
+type caseInsensitiveValues url.Values // map[string][]string
+func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
+ key = strings.ToLower(key)
+ for k, v := range values {
+ if strings.ToLower(k) == key {
+ return v, true
+ }
+ }
+ return []string{}, false
+}
+
+// URL returns a URL object whose fields are initialized from the FileURLParts fields. The URL's RawQuery
+// field contains the SAS, snapshot, and unparsed query parameters.
+func (up FileURLParts) URL() url.URL {
+ path := ""
+ // Concatenate account name for IP endpoint style URL
+ if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
+ path += "/" + up.IPEndpointStyleInfo.AccountName
+ }
+ // Concatenate share & path of directory or file (if they exist)
+ if up.ShareName != "" {
+ path += "/" + up.ShareName
+ if up.DirectoryOrFilePath != "" {
+ path += "/" + up.DirectoryOrFilePath
+ }
+ }
+
+ rawQuery := up.UnparsedParams
+
+ // Concatenate share snapshot query parameter (if it exists)
+ if up.ShareSnapshot != "" {
+ if len(rawQuery) > 0 {
+ rawQuery += "&"
+ }
+ rawQuery += shareSnapshot + "=" + up.ShareSnapshot
+ }
+ sas := up.SAS.Encode()
+ if sas != "" {
+ if len(rawQuery) > 0 {
+ rawQuery += "&"
+ }
+ rawQuery += sas
+ }
+ u := url.URL{
+ Scheme: up.Scheme,
+ Host: up.Host,
+ Path: path,
+ RawQuery: rawQuery,
+ }
+ return u
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/sas_service.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/sas_service.go
new file mode 100644
index 0000000..68db698
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/sas_service.go
@@ -0,0 +1,207 @@
+package azfile
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// FileSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage share or file.
+type FileSASSignatureValues struct {
+ Version string `param:"sv"` // If not specified, this defaults to SASVersion
+ Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
+ StartTime time.Time `param:"st"` // Not specified if IsZero
+ ExpiryTime time.Time `param:"se"` // Not specified if IsZero
+ Permissions string `param:"sp"` // Create by initializing a ShareSASPermissions or FileSASPermissions and then call String()
+ IPRange IPRange `param:"sip"`
+ Identifier string `param:"si"`
+ ShareName string
+ FilePath string // Ex: "directory/FileName" or "FileName". Use "" to create a Share SAS.
+ CacheControl string // rscc
+ ContentDisposition string // rscd
+ ContentEncoding string // rsce
+ ContentLanguage string // rscl
+ ContentType string // rsct
+}
+
+// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
+// the proper SAS query parameters.
+func (v FileSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
+ if sharedKeyCredential == nil {
+ return SASQueryParameters{}, errors.New("sharedKeyCredential can't be nil")
+ }
+
+ resource := "s"
+ if v.FilePath == "" {
+ // Make sure the permission characters are in the correct order
+ perms := &ShareSASPermissions{}
+ if err := perms.Parse(v.Permissions); err != nil {
+ return SASQueryParameters{}, err
+ }
+ v.Permissions = perms.String()
+ } else {
+ resource = "f"
+ // Make sure the permission characters are in the correct order
+ perms := &FileSASPermissions{}
+ if err := perms.Parse(v.Permissions); err != nil {
+ return SASQueryParameters{}, err
+ }
+ v.Permissions = perms.String()
+ }
+ if v.Version == "" {
+ v.Version = SASVersion
+ }
+ startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
+
+ // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
+ stringToSign := strings.Join([]string{
+ v.Permissions,
+ startTime,
+ expiryTime,
+ getCanonicalName(sharedKeyCredential.AccountName(), v.ShareName, v.FilePath),
+ v.Identifier,
+ v.IPRange.String(),
+ string(v.Protocol),
+ v.Version,
+ v.CacheControl, // rscc
+ v.ContentDisposition, // rscd
+ v.ContentEncoding, // rsce
+ v.ContentLanguage, // rscl
+ v.ContentType}, // rsct
+ "\n")
+ signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
+
+ p := SASQueryParameters{
+ // Common SAS parameters
+ version: v.Version,
+ protocol: v.Protocol,
+ startTime: v.StartTime,
+ expiryTime: v.ExpiryTime,
+ permissions: v.Permissions,
+ ipRange: v.IPRange,
+
+ // Share/File-specific SAS parameters
+ resource: resource,
+ identifier: v.Identifier,
+ cacheControl: v.CacheControl,
+ contentDisposition: v.ContentDisposition,
+ contentEncoding: v.ContentEncoding,
+ contentLanguage: v.ContentLanguage,
+ contentType: v.ContentType,
+
+ // Calculated SAS signature
+ signature: signature,
+ }
+ return p, nil
+}
+
+// getCanonicalName computes the canonical name for a share or file resource for SAS signing.
+func getCanonicalName(account string, shareName string, filePath string) string {
+ // Share: "/file/account/sharename"
+ // File: "/file/account/sharename/filename"
+ // File: "/file/account/sharename/directoryname/filename"
+ elements := []string{"/file/", account, "/", shareName}
+ if filePath != "" {
+ dfp := strings.Replace(filePath, "\\", "/", -1)
+ if dfp[0] == '/' {
+ dfp = dfp[1:]
+ }
+ elements = append(elements, "/", dfp)
+ }
+ return strings.Join(elements, "")
+}
+
+// The ShareSASPermissions type simplifies creating the permissions string for an Azure Storage share SAS.
+// Initialize an instance of this type and then call its String method to set FileSASSignatureValues's Permissions field.
+type ShareSASPermissions struct {
+ Read, Create, Write, Delete, List bool
+}
+
+// String produces the SAS permissions string for an Azure Storage share.
+// Call this method to set FileSASSignatureValues's Permissions field.
+func (p ShareSASPermissions) String() string {
+ var b bytes.Buffer
+ if p.Read {
+ b.WriteRune('r')
+ }
+ if p.Create {
+ b.WriteRune('c')
+ }
+ if p.Write {
+ b.WriteRune('w')
+ }
+ if p.Delete {
+ b.WriteRune('d')
+ }
+ if p.List {
+ b.WriteRune('l')
+ }
+ return b.String()
+}
+
+// Parse initializes the ShareSASPermissions's fields from a string.
+func (p *ShareSASPermissions) Parse(s string) error {
+ *p = ShareSASPermissions{} // Clear the flags
+ for _, r := range s {
+ switch r {
+ case 'r':
+ p.Read = true
+ case 'c':
+ p.Create = true
+ case 'w':
+ p.Write = true
+ case 'd':
+ p.Delete = true
+ case 'l':
+ p.List = true
+ default:
+ return fmt.Errorf("Invalid permission: '%v'", r)
+ }
+ }
+ return nil
+}
+
+// The FileSASPermissions type simplifies creating the permissions string for an Azure Storage file SAS.
+// Initialize an instance of this type and then call its String method to set FileSASSignatureValues's Permissions field.
+type FileSASPermissions struct{ Read, Create, Write, Delete bool }
+
+// String produces the SAS permissions string for an Azure Storage file.
+// Call this method to set FileSASSignatureValues's Permissions field.
+func (p FileSASPermissions) String() string {
+ var b bytes.Buffer
+ if p.Read {
+ b.WriteRune('r')
+ }
+ if p.Create {
+ b.WriteRune('c')
+ }
+ if p.Write {
+ b.WriteRune('w')
+ }
+ if p.Delete {
+ b.WriteRune('d')
+ }
+ return b.String()
+}
+
+// Parse initializes the FileSASPermissions's fields from a string.
+func (p *FileSASPermissions) Parse(s string) error {
+ *p = FileSASPermissions{} // Clear the flags
+ for _, r := range s {
+ switch r {
+ case 'r':
+ p.Read = true
+ case 'c':
+ p.Create = true
+ case 'w':
+ p.Write = true
+ case 'd':
+ p.Delete = true
+ default:
+ return fmt.Errorf("Invalid permission: '%v'", r)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/service_codes_file.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/service_codes_file.go
new file mode 100644
index 0000000..7591380
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/service_codes_file.go
@@ -0,0 +1,58 @@
+package azfile
+
+// https://docs.microsoft.com/en-us/rest/api/storageservices/file-service-error-codes
+
+// ServiceCode values indicate a service failure.
+const (
+ // The file or directory could not be deleted because it is in use by an SMB client (409).
+ ServiceCodeCannotDeleteFileOrDirectory ServiceCodeType = "CannotDeleteFileOrDirectory"
+
+ // The specified resource state could not be flushed from an SMB client in the specified time (500).
+ ServiceCodeClientCacheFlushDelay ServiceCodeType = "ClientCacheFlushDelay"
+
+ // The specified resource is marked for deletion by an SMB client (409).
+ ServiceCodeDeletePending ServiceCodeType = "DeletePending"
+
+ // The specified directory is not empty (409).
+ ServiceCodeDirectoryNotEmpty ServiceCodeType = "DirectoryNotEmpty"
+
+ // A portion of the specified file is locked by an SMB client (409).
+ ServiceCodeFileLockConflict ServiceCodeType = "FileLockConflict"
+
+ // File or directory path is too long (400).
+ // Or File or directory path has too many subdirectories (400).
+ ServiceCodeInvalidFileOrDirectoryPathName ServiceCodeType = "InvalidFileOrDirectoryPathName"
+
+ // The specified parent path does not exist (404).
+ ServiceCodeParentNotFound ServiceCodeType = "ParentNotFound"
+
+ // The specified resource is read-only and cannot be modified at this time (409).
+ ServiceCodeReadOnlyAttribute ServiceCodeType = "ReadOnlyAttribute"
+
+ // The specified share already exists (409).
+ ServiceCodeShareAlreadyExists ServiceCodeType = "ShareAlreadyExists"
+
+ // The specified share is being deleted. Try operation later (409).
+ ServiceCodeShareBeingDeleted ServiceCodeType = "ShareBeingDeleted"
+
+ // The specified share is disabled by the administrator (403).
+ ServiceCodeShareDisabled ServiceCodeType = "ShareDisabled"
+
+ // The specified share does not exist (404).
+ ServiceCodeShareNotFound ServiceCodeType = "ShareNotFound"
+
+ // The specified resource may be in use by an SMB client (409).
+ ServiceCodeSharingViolation ServiceCodeType = "SharingViolation"
+
+ // Another Share Snapshot operation is in progress (409).
+ ServiceCodeShareSnapshotInProgress ServiceCodeType = "ShareSnapshotInProgress"
+
+ // The total number of snapshots for the share is over the limit (409).
+ ServiceCodeShareSnapshotCountExceeded ServiceCodeType = "ShareSnapshotCountExceeded"
+
+ // The operation is not supported on a share snapshot (400).
+ ServiceCodeShareSnapshotOperationNotSupported ServiceCodeType = "ShareSnapshotOperationNotSupported"
+
+ // The share has snapshots and the operation requires no snapshots (409).
+ ServiceCodeShareHasSnapshots ServiceCodeType = "ShareHasSnapshots"
+)
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/url_directory.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_directory.go
new file mode 100644
index 0000000..b08d410
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_directory.go
@@ -0,0 +1,147 @@
+package azfile
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// A DirectoryURL represents a URL to the Azure Storage directory allowing you to manipulate its directories and files.
+type DirectoryURL struct {
+ directoryClient directoryClient
+}
+
+// NewDirectoryURL creates a DirectoryURL object using the specified URL and request policy pipeline.
+// Note: p can't be nil.
+func NewDirectoryURL(url url.URL, p pipeline.Pipeline) DirectoryURL {
+ directoryClient := newDirectoryClient(url, p)
+ return DirectoryURL{directoryClient: directoryClient}
+}
+
+// URL returns the URL endpoint used by the DirectoryURL object.
+func (d DirectoryURL) URL() url.URL {
+ return d.directoryClient.URL()
+}
+
+// String returns the URL as a string.
+func (d DirectoryURL) String() string {
+ u := d.URL()
+ return u.String()
+}
+
+// WithPipeline creates a new DirectoryURL object identical to the source but with the specified request policy pipeline.
+func (d DirectoryURL) WithPipeline(p pipeline.Pipeline) DirectoryURL {
+ return NewDirectoryURL(d.URL(), p)
+}
+
+// NewFileURL creates a new FileURL object by concatenating fileName to the end of
+// DirectoryURL's URL. The new FileURL uses the same request policy pipeline as the DirectoryURL.
+// To change the pipeline, create the FileURL and then call its WithPipeline method passing in the
+// desired pipeline object. Or, call this package's NewFileURL instead of calling this object's
+// NewFileURL method.
+func (d DirectoryURL) NewFileURL(fileName string) FileURL {
+ fileURL := appendToURLPath(d.URL(), fileName)
+ return NewFileURL(fileURL, d.directoryClient.Pipeline())
+}
+
+// NewDirectoryURL creates a new DirectoryURL object by concatenating directoryName to the end of
+// DirectoryURL's URL. The new DirectoryURL uses the same request policy pipeline as the DirectoryURL.
+// To change the pipeline, create the DirectoryURL and then call its WithPipeline method passing in the
+// desired pipeline object. Or, call this package's NewDirectoryURL instead of calling this object's
+// NewDirectoryURL method.
+func (d DirectoryURL) NewDirectoryURL(directoryName string) DirectoryURL {
+ directoryURL := appendToURLPath(d.URL(), directoryName)
+ return NewDirectoryURL(directoryURL, d.directoryClient.Pipeline())
+}
+
+// Create creates a new directory within a storage account.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-directory.
+// Pass default values for SMB properties (ex: "None" for file attributes).
+func (d DirectoryURL) Create(ctx context.Context, metadata Metadata) (*DirectoryCreateResponse, error) {
+ defaultPermissions := "inherit"
+ return d.directoryClient.Create(ctx, "None", "now", "now", nil, metadata,
+ &defaultPermissions, nil)
+}
+
+// Delete removes the specified empty directory. Note that the directory must be empty before it can be deleted..
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-directory.
+func (d DirectoryURL) Delete(ctx context.Context) (*DirectoryDeleteResponse, error) {
+ return d.directoryClient.Delete(ctx, nil)
+}
+
+// GetProperties returns the directory's metadata and system properties.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-directory-properties.
+func (d DirectoryURL) GetProperties(ctx context.Context) (*DirectoryGetPropertiesResponse, error) {
+ return d.directoryClient.GetProperties(ctx, nil, nil)
+}
+
+// SetMetadata sets the directory's metadata.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-directory-metadata.
+func (d DirectoryURL) SetMetadata(ctx context.Context, metadata Metadata) (*DirectorySetMetadataResponse, error) {
+ return d.directoryClient.SetMetadata(ctx, nil, metadata)
+}
+
+// ListFilesAndDirectoriesOptions defines options available when calling ListFilesAndDirectoriesSegment.
+type ListFilesAndDirectoriesOptions struct {
+ Prefix string // No Prefix header is produced if ""
+ MaxResults int32 // 0 means unspecified
+}
+
+func (o *ListFilesAndDirectoriesOptions) pointers() (prefix *string, maxResults *int32) {
+ if o.Prefix != "" {
+ prefix = &o.Prefix
+ }
+ if o.MaxResults != 0 {
+ maxResults = &o.MaxResults
+ }
+ return
+}
+
+// toConvenienceModel convert raw response to convenience model.
+// func (r *listFilesAndDirectoriesSegmentResponse) toConvenienceModel() *ListFilesAndDirectoriesSegmentResponse {
+// cr := ListFilesAndDirectoriesSegmentResponse{
+// rawResponse: r.rawResponse,
+// ServiceEndpoint: r.ServiceEndpoint,
+// ShareName: r.ShareName,
+// ShareSnapshot: r.ShareSnapshot,
+// DirectoryPath: r.DirectoryPath,
+// Prefix: r.Prefix,
+// Marker: r.Marker,
+// MaxResults: r.MaxResults,
+// NextMarker: r.NextMarker,
+// }
+
+// for _, e := range r.Entries {
+// if f, isFile := e.AsFileEntry(); isFile {
+// cr.Files = append(cr.Files, *f)
+// } else if d, isDir := e.AsDirectoryEntry(); isDir {
+// cr.Directories = append(cr.Directories, *d)
+// } else {
+// // Logic should not be here, otherwise client is not aligning to latest REST API document
+// panic(fmt.Errorf("invalid entry type found, entry info: %v", e))
+// }
+
+// }
+
+// return &cr
+// }
+
+// ListFilesAndDirectoriesSegmentAutoRest is the implementation using Auto Rest generated protocol code.
+// func (d DirectoryURL) ListFilesAndDirectoriesSegmentAutoRest(ctx context.Context, marker Marker, o ListFilesAndDirectoriesOptions) (*ListFilesAndDirectoriesSegmentResponse, error) {
+// prefix, maxResults := o.pointers()
+
+// rawResponse, error := d.directoryClient.ListFilesAndDirectoriesSegmentAutoRest(ctx, prefix, nil, marker.val, maxResults, nil)
+
+// return rawResponse.toConvenienceModel(), error
+// }
+
+// ListFilesAndDirectoriesSegment returns a single segment of files and directories starting from the specified Marker.
+// Use an empty Marker to start enumeration from the beginning. File and directory names are returned in lexicographic order.
+// After getting a segment, process it, and then call ListFilesAndDirectoriesSegment again (passing the the previously-returned
+// Marker) to get the next segment. This method lists the contents only for a single level of the directory hierarchy.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files.
+func (d DirectoryURL) ListFilesAndDirectoriesSegment(ctx context.Context, marker Marker, o ListFilesAndDirectoriesOptions) (*ListFilesAndDirectoriesSegmentResponse, error) {
+ prefix, maxResults := o.pointers()
+ return d.directoryClient.ListFilesAndDirectoriesSegment(ctx, prefix, nil, marker.Val, maxResults, nil)
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/url_file.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_file.go
new file mode 100644
index 0000000..78a32d2
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_file.go
@@ -0,0 +1,211 @@
+package azfile
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+const (
+ fileType = "file"
+
+ // FileMaxUploadRangeBytes indicates the maximum number of bytes that can be sent in a call to UploadRange.
+ FileMaxUploadRangeBytes = 4 * 1024 * 1024 // 4MB
+
+ // FileMaxSizeInBytes indicates the maxiumum file size, in bytes.
+ FileMaxSizeInBytes int64 = 1 * 1024 * 1024 * 1024 * 1024 // 1TB
+)
+
+// A FileURL represents a URL to an Azure Storage file.
+type FileURL struct {
+ fileClient fileClient
+}
+
+// NewFileURL creates a FileURL object using the specified URL and request policy pipeline.
+// Note: p can't be nil.
+func NewFileURL(url url.URL, p pipeline.Pipeline) FileURL {
+ fileClient := newFileClient(url, p)
+ return FileURL{fileClient: fileClient}
+}
+
+// URL returns the URL endpoint used by the FileURL object.
+func (f FileURL) URL() url.URL {
+ return f.fileClient.URL()
+}
+
+// String returns the URL as a string.
+func (f FileURL) String() string {
+ u := f.URL()
+ return u.String()
+}
+
+// WithPipeline creates a new FileURL object identical to the source but with the specified request policy pipeline.
+func (f FileURL) WithPipeline(p pipeline.Pipeline) FileURL {
+ return NewFileURL(f.fileClient.URL(), p)
+}
+
+// WithSnapshot creates a new FileURL object identical to the source but with the specified share snapshot timestamp.
+// Pass time.Time{} to remove the share snapshot returning a URL to the base file.
+func (f FileURL) WithSnapshot(shareSnapshot string) FileURL {
+ p := NewFileURLParts(f.URL())
+ p.ShareSnapshot = shareSnapshot
+ return NewFileURL(p.URL(), f.fileClient.Pipeline())
+}
+
+// Create creates a new file or replaces a file. Note that this method only initializes the file.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/create-file.
+// Pass default values for SMB properties (ex: "None" for file attributes).
+func (f FileURL) Create(ctx context.Context, size int64, h FileHTTPHeaders, metadata Metadata) (*FileCreateResponse, error) {
+ defaultPermissions := "inherit"
+ return f.fileClient.Create(ctx, size, "None", "now", "now", nil,
+ &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, &h.CacheControl,
+ h.ContentMD5, &h.ContentDisposition, metadata, &defaultPermissions, nil)
+}
+
+// StartCopy copies the data at the source URL to a file.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-file.
+func (f FileURL) StartCopy(ctx context.Context, source url.URL, metadata Metadata) (*FileStartCopyResponse, error) {
+ return f.fileClient.StartCopy(ctx, source.String(), nil, metadata)
+}
+
+// AbortCopy stops a pending copy that was previously started and leaves a destination file with 0 length and metadata.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-file.
+func (f FileURL) AbortCopy(ctx context.Context, copyID string) (*FileAbortCopyResponse, error) {
+ return f.fileClient.AbortCopy(ctx, copyID, nil)
+}
+
+// Download downloads count bytes of data from the start offset.
+// The response includes all of the file’s properties. However, passing true for rangeGetContentMD5 returns the range’s MD5 in the ContentMD5
+// response header/property if the range is <= 4MB; the HTTP request fails with 400 (Bad Request) if the requested range is greater than 4MB.
+// Note: offset must be >=0, count must be >= 0.
+// If count is CountToEnd (0), then data is read from specified offset to the end.
+// rangeGetContentMD5 only works with partial data downloading.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-file.
+func (f FileURL) Download(ctx context.Context, offset int64, count int64, rangeGetContentMD5 bool) (*RetryableDownloadResponse, error) {
+ var xRangeGetContentMD5 *bool
+ if rangeGetContentMD5 {
+ if offset == 0 && count == CountToEnd {
+ return nil, errors.New("invalid argument, rangeGetContentMD5 only works with partial data downloading")
+ }
+ xRangeGetContentMD5 = &rangeGetContentMD5
+ }
+ dr, err := f.fileClient.Download(ctx, nil, httpRange{offset: offset, count: count}.pointers(), xRangeGetContentMD5)
+ if err != nil {
+ return nil, err
+ }
+
+ return &RetryableDownloadResponse{
+ f: f,
+ dr: dr,
+ ctx: ctx,
+ info: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, // TODO: Note conditional header is not currently supported in Azure File.
+ }, err
+}
+
+// Body constructs a stream to read data from with a resilient reader option.
+// A zero-value option means to get a raw stream.
+func (dr *RetryableDownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
+ if o.MaxRetryRequests == 0 {
+ return dr.Response().Body
+ }
+
+ return NewRetryReader(
+ dr.ctx,
+ dr.Response(),
+ dr.info,
+ o,
+ func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) {
+ resp, err := dr.f.Download(ctx, info.Offset, info.Count, false)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Response(), err
+ })
+}
+
+// Delete immediately removes the file from the storage account.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-file2.
+func (f FileURL) Delete(ctx context.Context) (*FileDeleteResponse, error) {
+ return f.fileClient.Delete(ctx, nil)
+}
+
+// GetProperties returns the file's metadata and properties.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-file-properties.
+func (f FileURL) GetProperties(ctx context.Context) (*FileGetPropertiesResponse, error) {
+ return f.fileClient.GetProperties(ctx, nil, nil)
+}
+
+// SetHTTPHeaders sets file's system properties.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-file-properties.
+func (f FileURL) SetHTTPHeaders(ctx context.Context, h FileHTTPHeaders) (*FileSetHTTPHeadersResponse, error) {
+ defaultPermissions := "preserve"
+ return f.fileClient.SetHTTPHeaders(ctx, "preserve", "preserve", "preserve", nil,
+ nil, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, &h.CacheControl, h.ContentMD5,
+ &h.ContentDisposition, &defaultPermissions, nil)
+}
+
+// SetMetadata sets a file's metadata.
+// https://docs.microsoft.com/rest/api/storageservices/set-file-metadata.
+func (f FileURL) SetMetadata(ctx context.Context, metadata Metadata) (*FileSetMetadataResponse, error) {
+ return f.fileClient.SetMetadata(ctx, nil, metadata)
+}
+
+// Resize resizes the file to the specified size.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-file-properties.
+func (f FileURL) Resize(ctx context.Context, length int64) (*FileSetHTTPHeadersResponse, error) {
+ defaultPermissions := "preserve"
+ return f.fileClient.SetHTTPHeaders(ctx, "preserve", "preserve", "preserve", nil,
+ &length, nil, nil, nil, nil,
+ nil, nil, &defaultPermissions, nil)
+}
+
+// UploadRange writes bytes to a file.
+// offset indicates the offset at which to begin writing, in bytes.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-range.
+func (f FileURL) UploadRange(ctx context.Context, offset int64, body io.ReadSeeker, transactionalMD5 []byte) (*FileUploadRangeResponse, error) {
+ if body == nil {
+ return nil, errors.New("invalid argument, body must not be nil")
+ }
+
+ count := validateSeekableStreamAt0AndGetCount(body)
+ if count == 0 {
+ return nil, errors.New("invalid argument, body must contain readable data whose size is > 0")
+ }
+
+ // TransactionalContentMD5 isn't supported currently.
+ return f.fileClient.UploadRange(ctx, *toRange(offset, count), FileRangeWriteUpdate, count, body, nil, transactionalMD5)
+}
+
+// Update range with bytes from a specific URL.
+// offset indicates the offset at which to begin writing, in bytes.
+func (f FileURL) UploadRangeFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64,
+ count int64) (*FileUploadRangeFromURLResponse, error) {
+
+ return f.fileClient.UploadRangeFromURL(ctx, *toRange(destOffset, count), sourceURL.String(), 0, nil,
+ toRange(sourceOffset, count), nil, nil, nil)
+}
+
+// ClearRange clears the specified range and releases the space used in storage for that range.
+// offset means the start offset of the range to clear.
+// count means count of bytes to clean, it cannot be CountToEnd (0), and must be explictly specified.
+// If the range specified is not 512-byte aligned, the operation will write zeros to
+// the start or end of the range that is not 512-byte aligned and free the rest of the range inside that is 512-byte aligned.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-range.
+func (f FileURL) ClearRange(ctx context.Context, offset int64, count int64) (*FileUploadRangeResponse, error) {
+ if count <= 0 {
+ return nil, errors.New("invalid argument, count cannot be CountToEnd, and must be > 0")
+ }
+
+ return f.fileClient.UploadRange(ctx, *toRange(offset, count), FileRangeWriteClear, 0, nil, nil, nil)
+}
+
+// GetRangeList returns the list of valid ranges for a file.
+// Use a count with value CountToEnd (0) to indicate the left part of file start from offset.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/list-ranges.
+func (f FileURL) GetRangeList(ctx context.Context, offset int64, count int64) (*Ranges, error) {
+ return f.fileClient.GetRangeList(ctx, nil, nil, httpRange{offset: offset, count: count}.pointers())
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/url_service.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_service.go
new file mode 100644
index 0000000..b633f7d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_service.go
@@ -0,0 +1,197 @@
+package azfile
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+const (
+ // storageAnalyticsVersion indicates the version of Storage Analytics to configure. Use "1.0" for this value.
+ // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties.
+ storageAnalyticsVersion = "1.0"
+)
+
+// A ServiceURL represents a URL to the Azure Storage File service allowing you to manipulate file shares.
+type ServiceURL struct {
+ client serviceClient
+}
+
+// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
+// Note: p can't be nil.
+func NewServiceURL(url url.URL, p pipeline.Pipeline) ServiceURL {
+ client := newServiceClient(url, p)
+ return ServiceURL{client: client}
+}
+
+// URL returns the URL endpoint used by the ServiceURL object.
+func (s ServiceURL) URL() url.URL {
+ return s.client.URL()
+}
+
+// String returns the URL as a string.
+func (s ServiceURL) String() string {
+ u := s.URL()
+ return u.String()
+}
+
+// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
+func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL {
+ return NewServiceURL(s.URL(), p)
+}
+
+// NewShareURL creates a new ShareURL object by concatenating shareName to the end of
+// ServiceURL's URL. The new ShareURL uses the same request policy pipeline as the ServiceURL.
+// To change the pipeline, create the ShareURL and then call its WithPipeline method passing in the
+// desired pipeline object. Or, call this package's NewShareURL instead of calling this object's
+// NewShareURL method.
+func (s ServiceURL) NewShareURL(shareName string) ShareURL {
+ shareURL := appendToURLPath(s.URL(), shareName)
+ return NewShareURL(shareURL, s.client.Pipeline())
+}
+
+// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
+func appendToURLPath(u url.URL, name string) url.URL {
+ // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
+ // When you call url.Parse() this is what you'll get:
+ // Scheme: "https"
+ // Opaque: ""
+ // User: nil
+ // Host: "ms.com"
+ // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
+ // RawPath: ""
+ // ForceQuery: false
+ // RawQuery: "k1=v1&k2=v2"
+ // Fragment: "f"
+ if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' {
+ u.Path += "/" // Append "/" to end before appending name
+ }
+ u.Path += name
+ return u
+}
+
+// ListSharesSegment returns a single segment of shares starting from the specified Marker. Use an empty
+// Marker to start enumeration from the beginning. Share names are returned in lexicographic order.
+// After getting a segment, process it, and then call ListSharesSegment again (passing the the previously-returned
+// Marker) to get the next segment. For more information, see
+// https://docs.microsoft.com/en-us/rest/api/storageservices/list-shares.
+func (s ServiceURL) ListSharesSegment(ctx context.Context, marker Marker, o ListSharesOptions) (*ListSharesResponse, error) {
+ prefix, include, maxResults := o.pointers()
+ return s.client.ListSharesSegment(ctx, prefix, marker.Val, maxResults, include, nil)
+}
+
+// ListSharesOptions defines options available when calling ListSharesSegment.
+type ListSharesOptions struct {
+ Detail ListSharesDetail // No IncludeType header is produced if ""
+ Prefix string // No Prefix header is produced if ""
+ MaxResults int32 // 0 means unspecified
+}
+
+func (o *ListSharesOptions) pointers() (prefix *string, include []ListSharesIncludeType, maxResults *int32) {
+ if o.Prefix != "" {
+ prefix = &o.Prefix
+ }
+ if o.MaxResults != 0 {
+ maxResults = &o.MaxResults
+ }
+ include = o.Detail.toArray()
+ return
+}
+
+// ListSharesDetail indicates what additional information the service should return with each share.
+type ListSharesDetail struct {
+ Metadata, Snapshots bool
+}
+
+// toArray produces the Include query parameter's value.
+func (d *ListSharesDetail) toArray() []ListSharesIncludeType {
+ items := make([]ListSharesIncludeType, 0, 2)
+ if d.Metadata {
+ items = append(items, ListSharesIncludeMetadata)
+ }
+ if d.Snapshots {
+ items = append(items, ListSharesIncludeSnapshots)
+ }
+
+ return items
+}
+
+// toFsp converts StorageServiceProperties to convenience representation FileServiceProperties.
+// This method is added considering protocol layer's swagger unification purpose.
+func (ssp *StorageServiceProperties) toFsp() *FileServiceProperties {
+ if ssp == nil {
+ return nil
+ }
+
+ return &FileServiceProperties{
+ rawResponse: ssp.rawResponse,
+ HourMetrics: ssp.HourMetrics.toMp(),
+ MinuteMetrics: ssp.MinuteMetrics.toMp(),
+ Cors: ssp.Cors,
+ }
+}
+
+// toMp converts Metrics to convenience representation MetricProperties.
+// This method is added considering protocol layer's swagger unification purpose.
+func (m *Metrics) toMp() MetricProperties {
+ mp := MetricProperties{}
+ if m.Enabled {
+ mp.MetricEnabled = true
+ mp.IncludeAPIs = *m.IncludeAPIs
+ if m.RetentionPolicy != nil && m.RetentionPolicy.Enabled {
+ mp.RetentionPolicyEnabled = true
+ mp.RetentionDays = *m.RetentionPolicy.Days
+ }
+ }
+
+ return mp
+}
+
+// toSsp converts FileServiceProperties to convenience representation StorageServiceProperties.
+// This method is added considering protocol layer's swagger unification purpose.
+func (fsp *FileServiceProperties) toSsp() *StorageServiceProperties {
+ if fsp == nil {
+ return nil
+ }
+
+ return &StorageServiceProperties{
+ rawResponse: fsp.rawResponse,
+ HourMetrics: fsp.HourMetrics.toM(),
+ MinuteMetrics: fsp.MinuteMetrics.toM(),
+ Cors: fsp.Cors,
+ }
+}
+
+// toM converts MetricProperties to Metrics.
+// This method is added considering protocol layer's swagger unification purpose.
+func (mp MetricProperties) toM() *Metrics {
+ m := Metrics{
+ Version: storageAnalyticsVersion,
+ RetentionPolicy: &RetentionPolicy{}} // Note: Version and RetentionPolicy are actually mandatory.
+
+ if mp.MetricEnabled {
+ m.Enabled = true
+ m.IncludeAPIs = &mp.IncludeAPIs
+ if mp.RetentionPolicyEnabled {
+ m.RetentionPolicy.Enabled = true
+ m.RetentionPolicy.Days = &mp.RetentionDays
+ }
+ }
+
+ return &m
+}
+
+// GetProperties returns the properties of the File service.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-file-service-properties.
+func (s ServiceURL) GetProperties(ctx context.Context) (*FileServiceProperties, error) {
+ ssp, error := s.client.GetProperties(ctx, nil)
+
+ return ssp.toFsp(), error
+}
+
+// SetProperties sets the properties of the File service.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties.
+func (s ServiceURL) SetProperties(ctx context.Context, properties FileServiceProperties) (*ServiceSetPropertiesResponse, error) {
+ return s.client.SetProperties(ctx, *properties.toSsp(), nil)
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/url_share.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_share.go
new file mode 100644
index 0000000..013af70
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/url_share.go
@@ -0,0 +1,166 @@
+package azfile
+
+import (
+ "bytes"
+ "context"
+ "net/url"
+ "strings"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// A ShareURL represents a URL to the Azure Storage share allowing you to manipulate its directories and files.
+type ShareURL struct {
+ shareClient shareClient
+}
+
+// NewShareURL creates a ShareURL object using the specified URL and request policy pipeline.
+// Note: p can't be nil.
+func NewShareURL(url url.URL, p pipeline.Pipeline) ShareURL {
+ shareClient := newShareClient(url, p)
+ return ShareURL{shareClient: shareClient}
+}
+
+// URL returns the URL endpoint used by the ShareURL object.
+func (s ShareURL) URL() url.URL {
+ return s.shareClient.URL()
+}
+
+// String returns the URL as a string.
+func (s ShareURL) String() string {
+ u := s.URL()
+ return u.String()
+}
+
+// WithPipeline creates a new ShareURL object identical to the source but with the specified request policy pipeline.
+func (s ShareURL) WithPipeline(p pipeline.Pipeline) ShareURL {
+ return NewShareURL(s.URL(), p)
+}
+
+// WithSnapshot creates a new ShareURL object identical to the source but with the specified snapshot timestamp.
+// Pass time.Time{} to remove the snapshot returning a URL to the base share.
+func (s ShareURL) WithSnapshot(snapshot string) ShareURL {
+ p := NewFileURLParts(s.URL())
+ p.ShareSnapshot = snapshot
+ return NewShareURL(p.URL(), s.shareClient.Pipeline())
+}
+
+// NewDirectoryURL creates a new DirectoryURL object by concatenating directoryName to the end of
+// ShareURL's URL. The new DirectoryURL uses the same request policy pipeline as the ShareURL.
+// To change the pipeline, create the DirectoryURL and then call its WithPipeline method passing in the
+// desired pipeline object. Or, call this package's NewDirectoryURL instead of calling this object's
+// NewDirectoryURL method.
+func (s ShareURL) NewDirectoryURL(directoryName string) DirectoryURL {
+ directoryURL := appendToURLPath(s.URL(), directoryName)
+ return NewDirectoryURL(directoryURL, s.shareClient.Pipeline())
+}
+
+// NewRootDirectoryURL creates a new DirectoryURL object using ShareURL's URL.
+// The new DirectoryURL uses the same request policy pipeline as the
+// ShareURL. To change the pipeline, create the DirectoryURL and then call its WithPipeline method
+// passing in the desired pipeline object. Or, call NewDirectoryURL instead of calling the NewDirectoryURL method.
+func (s ShareURL) NewRootDirectoryURL() DirectoryURL {
+ return NewDirectoryURL(s.URL(), s.shareClient.Pipeline())
+}
+
+// Create creates a new share within a storage account. If a share with the same name already exists, the operation fails.
+// quotaInGB specifies the maximum size of the share in gigabytes, 0 means you accept service's default quota.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-share.
+func (s ShareURL) Create(ctx context.Context, metadata Metadata, quotaInGB int32) (*ShareCreateResponse, error) {
+ var quota *int32
+ if quotaInGB != 0 {
+ quota = "aInGB
+ }
+ return s.shareClient.Create(ctx, nil, metadata, quota)
+}
+
+// CreateSnapshot creates a read-only snapshot of a share.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/snapshot-share.
+func (s ShareURL) CreateSnapshot(ctx context.Context, metadata Metadata) (*ShareCreateSnapshotResponse, error) {
+ return s.shareClient.CreateSnapshot(ctx, nil, metadata)
+}
+
+// Delete marks the specified share or share snapshot for deletion.
+// The share or share snapshot and any files contained within it are later deleted during garbage collection.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-share.
+func (s ShareURL) Delete(ctx context.Context, deleteSnapshotsOption DeleteSnapshotsOptionType) (*ShareDeleteResponse, error) {
+ return s.shareClient.Delete(ctx, nil, nil, deleteSnapshotsOption)
+}
+
+// GetProperties returns all user-defined metadata and system properties for the specified share or share snapshot.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-share-properties.
+func (s ShareURL) GetProperties(ctx context.Context) (*ShareGetPropertiesResponse, error) {
+ return s.shareClient.GetProperties(ctx, nil, nil)
+}
+
+// SetQuota sets service-defined properties for the specified share.
+// quotaInGB specifies the maximum size of the share in gigabytes, 0 means no quote and uses service's default value.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-share-properties.
+func (s ShareURL) SetQuota(ctx context.Context, quotaInGB int32) (*ShareSetQuotaResponse, error) {
+ var quota *int32
+ if quotaInGB != 0 {
+ quota = "aInGB
+ }
+ return s.shareClient.SetQuota(ctx, nil, quota)
+}
+
+// SetMetadata sets the share's metadata.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-share-metadata.
+func (s ShareURL) SetMetadata(ctx context.Context, metadata Metadata) (*ShareSetMetadataResponse, error) {
+ return s.shareClient.SetMetadata(ctx, nil, metadata)
+}
+
+// GetPermissions returns information about stored access policies specified on the share.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-share-acl.
+func (s ShareURL) GetPermissions(ctx context.Context) (*SignedIdentifiers, error) {
+ return s.shareClient.GetAccessPolicy(ctx, nil)
+}
+
+// The AccessPolicyPermission type simplifies creating the permissions string for a share's access policy.
+// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
+type AccessPolicyPermission struct {
+ Read, Create, Write, Delete, List bool
+}
+
+// String produces the access policy permission string for an Azure Storage share.
+// Call this method to set AccessPolicy's Permission field.
+func (p AccessPolicyPermission) String() string {
+ var b bytes.Buffer
+ if p.Read {
+ b.WriteRune('r')
+ }
+ if p.Create {
+ b.WriteRune('c')
+ }
+ if p.Write {
+ b.WriteRune('w')
+ }
+ if p.Delete {
+ b.WriteRune('d')
+ }
+ if p.List {
+ b.WriteRune('l')
+ }
+ return b.String()
+}
+
+// Parse initializes the AccessPolicyPermission's fields from a string.
+func (p *AccessPolicyPermission) Parse(s string) {
+ p.Read = strings.ContainsRune(s, 'r')
+ p.Create = strings.ContainsRune(s, 'c')
+ p.Write = strings.ContainsRune(s, 'w')
+ p.Delete = strings.ContainsRune(s, 'd')
+ p.List = strings.ContainsRune(s, 'l')
+}
+
+// SetPermissions sets a stored access policy for use with shared access signatures.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-share-acl.
+func (s ShareURL) SetPermissions(ctx context.Context, permissions []SignedIdentifier) (*ShareSetAccessPolicyResponse, error) {
+ return s.shareClient.SetAccessPolicy(ctx, permissions, nil)
+}
+
+// GetStatistics retrieves statistics related to the share.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-share-stats.
+func (s ShareURL) GetStatistics(ctx context.Context) (*ShareStats, error) {
+ return s.shareClient.GetStatistics(ctx, nil)
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/version.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/version.go
new file mode 100644
index 0000000..d5215c9
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/version.go
@@ -0,0 +1,3 @@
+package azfile
+
+const serviceLibVersion = "0.6.0"
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_anonymous.go
new file mode 100644
index 0000000..8b74b87
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_anonymous.go
@@ -0,0 +1,55 @@
+package azfile
+
+import (
+ "context"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// Credential represent any credential type; it is used to create a credential policy Factory.
+type Credential interface {
+ pipeline.Factory
+ credentialMarker()
+}
+
+type credentialFunc pipeline.FactoryFunc
+
+func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
+ return f(next, po)
+}
+
+// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
+func (credentialFunc) credentialMarker() {}
+
+//////////////////////////////
+
+// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
+// or for use with Shared Access Signatures (SAS).
+func NewAnonymousCredential() Credential {
+ return anonymousCredentialFactory
+}
+
+var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
+
+// anonymousCredentialPolicyFactory is the credential's policy factory.
+type anonymousCredentialPolicyFactory struct {
+}
+
+// New creates a credential policy object.
+func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
+ return &anonymousCredentialPolicy{next: next}
+}
+
+// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
+func (*anonymousCredentialPolicyFactory) credentialMarker() {}
+
+// anonymousCredentialPolicy is the credential's policy object.
+type anonymousCredentialPolicy struct {
+ next pipeline.Policy
+}
+
+// Do implements the credential's policy interface.
+func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
+ // For anonymous credentials, this is effectively a no-op
+ return p.next.Do(ctx, request)
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_shared_key.go
new file mode 100644
index 0000000..9ec7b15
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_shared_key.go
@@ -0,0 +1,187 @@
+package azfile
+
+import (
+ "bytes"
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "net/http"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
+// storage account's name and either its primary or secondary key.
+func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
+ bytes, err := base64.StdEncoding.DecodeString(accountKey)
+ if err != nil {
+ return &SharedKeyCredential{}, err
+ }
+ return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
+}
+
+// SharedKeyCredential contains an account's name and its primary or secondary key.
+// It is immutable making it shareable and goroutine-safe.
+type SharedKeyCredential struct {
+ // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
+ accountName string
+ accountKey []byte
+}
+
+// AccountName returns the Storage account's name.
+func (f SharedKeyCredential) AccountName() string {
+ return f.accountName
+}
+
+// New creates a credential policy object.
+func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
+ return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
+ // Add a x-ms-date header if it doesn't already exist
+ if d := request.Header.Get(headerXmsDate); d == "" {
+ request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
+ }
+ stringToSign := f.buildStringToSign(request)
+ signature := f.ComputeHMACSHA256(stringToSign)
+ authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
+ request.Header[headerAuthorization] = []string{authHeader}
+
+ response, err := next.Do(ctx, request)
+ if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
+ // Service failed to authenticate request, log it
+ po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
+ }
+ return response, err
+ })
+}
+
+// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
+func (*SharedKeyCredential) credentialMarker() {}
+
+// Constants ensuring that header names are correctly spelled and consistently cased.
+const (
+ headerAuthorization = "Authorization"
+ headerCacheControl = "Cache-Control"
+ headerContentEncoding = "Content-Encoding"
+ headerContentDisposition = "Content-Disposition"
+ headerContentLanguage = "Content-Language"
+ headerContentLength = "Content-Length"
+ headerContentMD5 = "Content-MD5"
+ headerContentType = "Content-Type"
+ headerDate = "Date"
+ headerIfMatch = "If-Match"
+ headerIfModifiedSince = "If-Modified-Since"
+ headerIfNoneMatch = "If-None-Match"
+ headerIfUnmodifiedSince = "If-Unmodified-Since"
+ headerRange = "Range"
+ headerUserAgent = "User-Agent"
+ headerXmsDate = "x-ms-date"
+ headerXmsVersion = "x-ms-version"
+)
+
+// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
+func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
+ h := hmac.New(sha256.New, f.accountKey)
+ h.Write([]byte(message))
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string {
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
+ headers := request.Header
+ contentLength := headers.Get(headerContentLength)
+ if contentLength == "0" {
+ contentLength = ""
+ }
+
+ stringToSign := strings.Join([]string{
+ request.Method,
+ headers.Get(headerContentEncoding),
+ headers.Get(headerContentLanguage),
+ contentLength,
+ headers.Get(headerContentMD5),
+ headers.Get(headerContentType),
+ "", // Empty date because x-ms-date is expected (as per web page above)
+ headers.Get(headerIfModifiedSince),
+ headers.Get(headerIfMatch),
+ headers.Get(headerIfNoneMatch),
+ headers.Get(headerIfUnmodifiedSince),
+ headers.Get(headerRange),
+ buildCanonicalizedHeader(headers),
+ f.buildCanonicalizedResource(request.URL),
+ }, "\n")
+ return stringToSign
+}
+
+func buildCanonicalizedHeader(headers http.Header) string {
+ cm := map[string][]string{}
+ for k, v := range headers {
+ headerName := strings.TrimSpace(strings.ToLower(k))
+ if strings.HasPrefix(headerName, "x-ms-") {
+ cm[headerName] = v // NOTE: the value must not have any whitespace around it.
+ }
+ }
+ if len(cm) == 0 {
+ return ""
+ }
+
+ keys := make([]string, 0, len(cm))
+ for key := range cm {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ ch := bytes.NewBufferString("")
+ for i, key := range keys {
+ if i > 0 {
+ ch.WriteRune('\n')
+ }
+ ch.WriteString(key)
+ ch.WriteRune(':')
+ ch.WriteString(strings.Join(cm[key], ","))
+ }
+ return string(ch.Bytes())
+}
+
+func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string {
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
+ cr := bytes.NewBufferString("/")
+ cr.WriteString(f.accountName)
+
+ if len(u.Path) > 0 {
+ // Any portion of the CanonicalizedResource string that is derived from
+ // the resource's URI should be encoded exactly as it is in the URI.
+ // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
+ cr.WriteString(u.EscapedPath())
+ } else {
+ // a slash is required to indicate the root path
+ cr.WriteString("/")
+ }
+
+ // params is a map[string][]string; param name is key; params values is []string
+ params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
+ if err != nil {
+ sanityCheckFailed(err.Error())
+ }
+
+ if len(params) > 0 { // There is at least 1 query parameter
+ paramNames := []string{} // We use this to sort the parameter key names
+ for paramName := range params {
+ paramNames = append(paramNames, paramName) // paramNames must be lowercase
+ }
+ sort.Strings(paramNames)
+
+ for _, paramName := range paramNames {
+ paramValues := params[paramName]
+ sort.Strings(paramValues)
+
+ // Join the sorted key values separated by ','
+ // Then prepend "keyName:"; then add this string to the buffer
+ cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
+ }
+ }
+ return string(cr.Bytes())
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_unix.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_unix.go
new file mode 100644
index 0000000..54f2f9f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_unix.go
@@ -0,0 +1,28 @@
+// +build linux darwin freebsd
+
+package azfile
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+type mmf []byte
+
+func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
+ prot, flags := unix.PROT_READ, unix.MAP_SHARED // Assume read-only
+ if writable {
+ prot, flags = unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED
+ }
+ addr, err := unix.Mmap(int(file.Fd()), offset, length, prot, flags)
+ return mmf(addr), err
+}
+
+func (m *mmf) unmap() {
+ err := unix.Munmap(*m)
+ *m = nil
+ if err != nil {
+ sanityCheckFailed(err.Error())
+ }
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_windows.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_windows.go
new file mode 100644
index 0000000..0050479
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_windows.go
@@ -0,0 +1,40 @@
+package azfile
+
+import (
+ "os"
+ "reflect"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+type mmf []byte
+
+func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
+ prot, access := uint32(windows.PAGE_READONLY), uint32(windows.FILE_MAP_READ) // Assume read-only
+ if writable {
+ prot, access = uint32(windows.PAGE_READWRITE), uint32(windows.FILE_MAP_WRITE)
+ }
+ maxSize := int64(offset + int64(length))
+ hMMF, errno := windows.CreateFileMapping(windows.Handle(file.Fd()), nil, prot, uint32(maxSize>>32), uint32(maxSize&0xffffffff), nil)
+ if hMMF == 0 {
+ return nil, os.NewSyscallError("CreateFileMapping", errno)
+ }
+ defer windows.CloseHandle(hMMF)
+ addr, errno := windows.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
+ m := mmf{}
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
+ h.Data = addr
+ h.Len = length
+ h.Cap = h.Len
+ return m, nil
+}
+
+func (m *mmf) unmap() {
+ addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
+ *m = mmf{}
+ err := windows.UnmapViewOfFile(addr)
+ if err != nil {
+ sanityCheckFailed(err.Error())
+ }
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_pipeline.go
new file mode 100644
index 0000000..6eaa038
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_pipeline.go
@@ -0,0 +1,44 @@
+package azfile
+
+import (
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
+type PipelineOptions struct {
+ // Log configures the pipeline's logging infrastructure indicating what information is logged and where.
+ Log pipeline.LogOptions
+
+ // Retry configures the built-in retry policy behavior.
+ Retry RetryOptions
+
+ // RequestLog configures the built-in request logging policy.
+ RequestLog RequestLogOptions
+
+ // Telemetry configures the built-in telemetry policy behavior.
+ Telemetry TelemetryOptions
+}
+
+// NewPipeline creates a Pipeline using the specified credentials and options.
+// Note: c can't be nil.
+func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
+ // Closest to API goes first; closest to the wire goes last
+ f := []pipeline.Factory{
+ NewTelemetryPolicyFactory(o.Telemetry),
+ NewUniqueRequestIDPolicyFactory(),
+ NewRetryPolicyFactory(o.Retry),
+ }
+
+ if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
+ // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
+ // NOTE: The credential's policy factory must appear close to the wire so it can sign any
+ // changes made by other factories (like UniqueRequestIDPolicyFactory)
+ f = append(f, c)
+ }
+ f = append(f,
+ NewRequestLogPolicyFactory(o.RequestLog),
+ pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
+
+
+ return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_request_log.go
new file mode 100644
index 0000000..3a984ff
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_request_log.go
@@ -0,0 +1,182 @@
+package azfile
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// RequestLogOptions configures the retry policy's behavior.
+type RequestLogOptions struct {
+ // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
+ // duration (-1=no logging; 0=default threshold).
+ LogWarningIfTryOverThreshold time.Duration
+}
+
+func (o RequestLogOptions) defaults() RequestLogOptions {
+ if o.LogWarningIfTryOverThreshold == 0 {
+ // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
+ // But this monitors the time to get the HTTP response; NOT the time to download the response body.
+ o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
+ }
+ return o
+}
+
+// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
+func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
+ o = o.defaults() // Force defaults to be calculated
+ return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
+ // These variables are per-policy; shared by multiple calls to Do
+ var try int32
+ operationStart := time.Now() // If this is the 1st try, record the operation state time
+ return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
+ try++ // The first try is #1 (not #0)
+
+ // Log the outgoing request as informational
+ if po.ShouldLog(pipeline.LogInfo) {
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try)
+ pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil)
+ po.Log(pipeline.LogInfo, b.String())
+ }
+
+ // Set the time for this particular retry operation and then Do the operation.
+ tryStart := time.Now()
+ response, err = next.Do(ctx, request) // Make the request
+ tryEnd := time.Now()
+ tryDuration := tryEnd.Sub(tryStart)
+ opDuration := tryEnd.Sub(operationStart)
+
+ logLevel, forceLog := pipeline.LogInfo, false // Default logging information
+
+ // If the response took too long, we'll upgrade to warning.
+ if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
+ // Log a warning if the try duration exceeded the specified threshold
+ logLevel, forceLog = pipeline.LogWarning, true
+ }
+
+ if err == nil { // We got a response from the service
+ sc := response.Response().StatusCode
+ if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
+ logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
+ } else {
+ // For other status codes, we leave the level as is.
+ }
+ } else { // This error did not get an HTTP response from the service; upgrade the severity to Error
+ logLevel, forceLog = pipeline.LogError, true
+ }
+
+ if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
+ // We're going to log this; build the string to log
+ b := &bytes.Buffer{}
+ slow := ""
+ if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
+ slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold)
+ }
+ fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
+ if err != nil { // This HTTP request did not get a response from the service
+ fmt.Fprint(b, "REQUEST ERROR\n")
+ } else {
+ if logLevel == pipeline.LogError {
+ fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
+ } else {
+ fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
+ }
+ }
+
+ pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err)
+ if logLevel <= pipeline.LogError {
+ b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
+ }
+ msg := b.String()
+
+ if forceLog {
+ pipeline.ForceLog(logLevel, msg)
+ }
+ if shouldLog {
+ po.Log(logLevel, msg)
+ }
+ }
+ return response, err
+ }
+ })
+}
+
+// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
+func RedactSigQueryParam(rawQuery string) (bool, string) {
+ rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
+ sigFound := strings.Contains(rawQuery, "?sig=")
+ if !sigFound {
+ sigFound = strings.Contains(rawQuery, "&sig=")
+ if !sigFound {
+ return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
+ }
+ }
+ // [?|&]sig= found, redact its value
+ values, _ := url.ParseQuery(rawQuery)
+ for name := range values {
+ if strings.EqualFold(name, "sig") {
+ values[name] = []string{"REDACTED"}
+ }
+ }
+ return sigFound, values.Encode()
+}
+
+func prepareRequestForLogging(request pipeline.Request) *http.Request {
+ req := request
+ if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
+ // Make copy so we don't destroy the query parameters we actually need to send in the request
+ req = request.Copy()
+ req.Request.URL.RawQuery = rawQuery
+ }
+
+ return prepareRequestForServiceLogging(req)
+}
+
+func stack() []byte {
+ buf := make([]byte, 1024)
+ for {
+ n := runtime.Stack(buf, false)
+ if n < len(buf) {
+ return buf[:n]
+ }
+ buf = make([]byte, 2*len(buf))
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+// Redact phase useful for blob and file service only. For other services,
+// this method can directly return request.Request.
+///////////////////////////////////////////////////////////////////////////////////////
+func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
+ req := request
+ if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
+ req = request.Copy()
+ url, err := url.Parse(req.Header.Get(key))
+ if err == nil {
+ if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
+ url.RawQuery = rawQuery
+ req.Header.Set(xMsCopySourceHeader, url.String())
+ }
+ }
+ }
+ return req.Request
+}
+
+const xMsCopySourceHeader = "x-ms-copy-source"
+
+func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
+ for keyInHeader := range header {
+ if strings.EqualFold(keyInHeader, key) {
+ return true, keyInHeader
+ }
+ }
+ return false, ""
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_retry.go
new file mode 100644
index 0000000..96857d4
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_retry.go
@@ -0,0 +1,403 @@
+package azfile
+
+import (
+ "context"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
+type RetryPolicy int32
+
+const (
+ // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
+ RetryPolicyExponential RetryPolicy = 0
+
+ // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
+ RetryPolicyFixed RetryPolicy = 1
+)
+
+// RetryOptions configures the retry policy's behavior.
+type RetryOptions struct {
+ // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
+ // A value of zero means that you accept our default policy.
+ Policy RetryPolicy
+
+ // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
+ // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
+ MaxTries int32
+
+ // TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
+ // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
+ // of data, the default TryTimeout will probably not be sufficient. You should override this value
+ // based on the bandwidth available to the host machine and proximity to the Storage service. A good
+ // starting point may be something like (60 seconds per MB of anticipated-payload-size).
+ TryTimeout time.Duration
+
+ // RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
+ // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
+ // with each retry up to a maximum specified by MaxRetryDelay.
+ // If you specify 0, then you must also specify 0 for MaxRetryDelay.
+ // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
+ // equal to or greater than RetryDelay.
+ RetryDelay time.Duration
+
+ // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
+ // If you specify 0, then you must also specify 0 for RetryDelay.
+ MaxRetryDelay time.Duration
+}
+
+func (o RetryOptions) retryReadsFromSecondaryHost() string {
+ return ""
+}
+
+func (o RetryOptions) defaults() RetryOptions {
+ // We assume the following:
+ // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
+ // 2. o.MaxTries >= 0
+ // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
+ // 4. o.RetryDelay <= o.MaxRetryDelay
+ // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
+
+ IfDefault := func(current *time.Duration, desired time.Duration) {
+ if *current == time.Duration(0) {
+ *current = desired
+ }
+ }
+
+ // Set defaults if unspecified
+ if o.MaxTries == 0 {
+ o.MaxTries = 4
+ }
+ switch o.Policy {
+ case RetryPolicyExponential:
+ IfDefault(&o.TryTimeout, 1*time.Minute)
+ IfDefault(&o.RetryDelay, 4*time.Second)
+ IfDefault(&o.MaxRetryDelay, 120*time.Second)
+
+ case RetryPolicyFixed:
+ IfDefault(&o.TryTimeout, 1*time.Minute)
+ IfDefault(&o.RetryDelay, 30*time.Second)
+ IfDefault(&o.MaxRetryDelay, 120*time.Second)
+ }
+ return o
+}
+
+func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
+ pow := func(number int64, exponent int32) int64 { // pow is nested helper function
+ var result int64 = 1
+ for n := int32(0); n < exponent; n++ {
+ result *= number
+ }
+ return result
+ }
+
+ delay := time.Duration(0)
+ switch o.Policy {
+ case RetryPolicyExponential:
+ delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay
+
+ case RetryPolicyFixed:
+ if try > 1 { // Any try after the 1st uses the fixed delay
+ delay = o.RetryDelay
+ }
+ }
+
+ // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
+ // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
+ delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
+ if delay > o.MaxRetryDelay {
+ delay = o.MaxRetryDelay
+ }
+ return delay
+}
+
+// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
+func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
+ o = o.defaults() // Force defaults to be calculated
+ return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
+ return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
+ // Before each try, we'll select either the primary or secondary URL.
+ primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
+
+ // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
+ considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
+
+ // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
+ // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
+ // If using a secondary:
+ // Even tries go against primary; odd tries go against the secondary
+ // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
+ // If secondary gets a 404, don't fail, retry but future retries are only against the primary
+ // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
+ for try := int32(1); try <= o.MaxTries; try++ {
+ logf("\n=====> Try=%d\n", try)
+
+ // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
+ tryingPrimary := !considerSecondary || (try%2 == 1)
+ // Select the correct host and delay
+ if tryingPrimary {
+ primaryTry++
+ delay := o.calcDelay(primaryTry)
+ logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
+ time.Sleep(delay) // The 1st try returns 0 delay
+ } else {
+ // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
+ delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
+ logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
+ time.Sleep(delay) // Delay with some jitter before trying secondary
+ }
+
+ // Clone the original request to ensure that each try starts with the original (unmutated) request.
+ requestCopy := request.Copy()
+
+ // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
+ // the stream may not be at offset 0 when we first get it and we want the same behavior for the
+ // 1st try as for additional tries.
+ if err = requestCopy.RewindBody(); err != nil {
+ sanityCheckFailed(err.Error())
+ }
+ if !tryingPrimary {
+ requestCopy.URL.Host = o.retryReadsFromSecondaryHost()
+ requestCopy.Host = o.retryReadsFromSecondaryHost()
+ }
+
+ // Set the server-side timeout query parameter "timeout=[seconds]"
+ timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
+ if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
+ t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
+ logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t)
+ if t < timeout {
+ timeout = t
+ }
+ if timeout < 0 {
+ timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
+ }
+ logf("TryTimeout adjusted to=%d sec\n", timeout)
+ }
+ q := requestCopy.Request.URL.Query()
+ q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
+ requestCopy.Request.URL.RawQuery = q.Encode()
+ logf("Url=%s\n", requestCopy.Request.URL.String())
+
+ // Set the time for this particular retry operation and then Do the operation.
+ tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout))
+ //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
+ response, err = next.Do(tryCtx, requestCopy) // Make the request
+ /*err = improveDeadlineExceeded(err)
+ if err == nil {
+ response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body}
+ }*/
+ logf("Err=%v, response=%v\n", err, response)
+
+ action := "" // This MUST get changed within the switch code below
+ switch {
+ case ctx.Err() != nil:
+ action = "NoRetry: Op timeout"
+ case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound:
+ // If attempt was against the secondary & it returned a StatusNotFound (404), then
+ // the resource was not found. This may be due to replication delay. So, in this
+ // case, we'll never try the secondary again for this operation.
+ considerSecondary = false
+ action = "Retry: Secondary URL returned 404"
+ case err != nil:
+ // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
+ // Use ServiceCode to verify if the error is related to storage service-side,
+ // ServiceCode is set only when error related to storage service happened.
+ if stErr, ok := err.(StorageError); ok {
+ if stErr.Temporary() {
+ action = "Retry: StorageError with error service code and Temporary()"
+ } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
+ action = "Retry: StorageError with success status code"
+ } else {
+ action = "NoRetry: StorageError not Temporary() and without retriable status code"
+ }
+ } else if netErr, ok := err.(net.Error); ok {
+ // Use non-retriable net.Error list, but not retriable list.
+ // As there are errors without Temporary() implementation,
+ // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
+ // So the SDK do retry for most of the case, unless the error should not be retried for sure.
+ if !isNotRetriable(netErr) {
+ action = "Retry: net.Error and not in the non-retriable list"
+ } else {
+ action = "NoRetry: net.Error and in the non-retriable list"
+ }
+ } else {
+ action = "NoRetry: unrecognized error"
+ }
+ default:
+ action = "NoRetry: successful HTTP request" // no error
+ }
+
+ logf("Action=%s\n", action)
+ // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
+ if action[0] != 'R' { // Retry only if action starts with 'R'
+ if err != nil {
+ tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
+ } else {
+ // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
+ // So, when the user closes the Body, the our per-try context gets closed too.
+ // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
+ if response == nil || response.Response() == nil {
+ // We do panic in the case response or response.Response() is nil,
+ // as for client, the response should not be nil if request is sent and the operations is executed successfully.
+ // Another option, is that execute the cancel function when response or response.Response() is nil,
+ // as in this case, current per-try has nothing to do in future.
+ sanityCheckFailed("invalid state, response should not be nil when the operation is executed successfully")
+ }
+
+ response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
+ }
+ break // Don't retry
+ }
+ if response != nil && response.Response() != nil && response.Response().Body != nil {
+ // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
+ body := response.Response().Body
+ io.Copy(ioutil.Discard, body)
+ body.Close()
+ }
+ // If retrying, cancel the current per-try timeout context
+ tryCancel()
+ }
+ return response, err // Not retryable or too many retries; return the last response/error
+ }
+ })
+}
+
+// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
+type contextCancelReadCloser struct {
+ cf context.CancelFunc
+ body io.ReadCloser
+}
+
+func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
+ return rc.body.Read(p)
+}
+
+func (rc *contextCancelReadCloser) Close() error {
+ err := rc.body.Close()
+ if rc.cf != nil {
+ rc.cf()
+ }
+ return err
+}
+
+// isNotRetriable checks if the provided net.Error isn't retriable.
+func isNotRetriable(errToParse net.Error) bool {
+ // No error, so this is NOT retriable.
+ if errToParse == nil {
+ return true
+ }
+
+ // The error is either temporary or a timeout so it IS retriable (not not retriable).
+ if errToParse.Temporary() || errToParse.Timeout() {
+ return false
+ }
+
+ genericErr := error(errToParse)
+
+ // From here all the error are neither Temporary() nor Timeout().
+ switch err := errToParse.(type) {
+ case *net.OpError:
+ // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
+ if err.Err == nil {
+ return true
+ }
+ genericErr = err.Err
+ }
+
+ switch genericErr.(type) {
+ case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
+ // If the error is one of the ones listed, then it is NOT retriable.
+ return true
+ }
+
+ // If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
+ // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
+ if strings.Contains(genericErr.Error(), "invalid header field") {
+ return true
+ }
+
+ // Assume the error is retriable.
+ return false
+}
+
+var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
+
+func isSuccessStatusCode(resp *http.Response) bool {
+ if resp == nil {
+ return false
+ }
+ for _, i := range successStatusCodes {
+ if i == resp.StatusCode {
+ return true
+ }
+ }
+ return false
+}
+
+// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
+var logf = func(format string, a ...interface{}) {}
+
+// Use this version to see the retry method's code path (import "fmt")
+//var logf = fmt.Printf
+
+/*
+type deadlineExceededReadCloser struct {
+ r io.ReadCloser
+}
+
+func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
+ n, err := 0, io.EOF
+ if r.r != nil {
+ n, err = r.r.Read(p)
+ }
+ return n, improveDeadlineExceeded(err)
+}
+func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
+ // For an HTTP request, the ReadCloser MUST also implement seek
+ // For an HTTP response, Seek MUST not be called (or this will panic)
+ o, err := r.r.(io.Seeker).Seek(offset, whence)
+ return o, improveDeadlineExceeded(err)
+}
+func (r *deadlineExceededReadCloser) Close() error {
+ if c, ok := r.r.(io.Closer); ok {
+ c.Close()
+ }
+ return nil
+}
+
+// timeoutError is the internal struct that implements our richer timeout error.
+type deadlineExceeded struct {
+ responseError
+}
+
+var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
+
+// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
+func improveDeadlineExceeded(cause error) error {
+ // If cause is not DeadlineExceeded, return the same error passed in.
+ if cause != context.DeadlineExceeded {
+ return cause
+ }
+ // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
+ return &deadlineExceeded{
+ responseError: responseError{
+ ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
+ },
+ }
+}
+
+// Error implements the error interface's Error method to return a string representation of the error.
+func (e *deadlineExceeded) Error() string {
+ return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field")
+}
+*/
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_telemetry.go
new file mode 100644
index 0000000..a99bfb3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_telemetry.go
@@ -0,0 +1,51 @@
+package azfile
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "runtime"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// TelemetryOptions configures the telemetry policy's behavior.
+type TelemetryOptions struct {
+ // Value is a string prepended to each request's User-Agent and sent to the service.
+ // The service records the user-agent in logs for diagnostics and tracking of client requests.
+ Value string
+}
+
+// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
+// which add telemetry information to outgoing HTTP requests.
+func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
+ b := &bytes.Buffer{}
+ b.WriteString(o.Value)
+ if b.Len() > 0 {
+ b.WriteRune(' ')
+ }
+ fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
+ telemetryValue := b.String()
+
+ return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
+ return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
+ request.Header.Set("User-Agent", telemetryValue)
+ return next.Do(ctx, request)
+ }
+ })
+}
+
+// NOTE: the ONLY function that should write to this variable is this func
+var platformInfo = func() string {
+ // Azure-Storage/version (runtime; os type and version)”
+ // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
+ operatingSystem := runtime.GOOS // Default OS string
+ switch operatingSystem {
+ case "windows":
+ operatingSystem = os.Getenv("OS") // Get more specific OS information
+ case "linux": // accept default OS info
+ case "freebsd": // accept default OS info
+ }
+ return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
+}()
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_unique_request_id.go
new file mode 100644
index 0000000..f501ad7
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_unique_request_id.go
@@ -0,0 +1,24 @@
+package azfile
+
+import (
+ "context"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
+// that sets the request's x-ms-client-request-id header if it doesn't already exist.
+func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
+ return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
+ // This is Policy's Do method:
+ return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
+ id := request.Header.Get(xMsClientRequestID)
+ if id == "" { // Add a unique request ID if the caller didn't specify one already
+ request.Header.Set(xMsClientRequestID, newUUID().String())
+ }
+ return next.Do(ctx, request)
+ }
+ })
+}
+
+const xMsClientRequestID = "x-ms-client-request-id"
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_retry_reader.go
new file mode 100644
index 0000000..f40f901
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_retry_reader.go
@@ -0,0 +1,185 @@
+package azfile
+
+import (
+ "context"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
+type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
+
+// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
+// that should be used to make an HTTP GET request.
+type HTTPGetterInfo struct {
+ // Offset specifies the start offset that should be used when
+ // creating the HTTP GET request's Range header
+ Offset int64
+
+ // Count specifies the count of bytes that should be used to calculate
+ // the end offset when creating the HTTP GET request's Range header
+ Count int64
+
+ // ETag specifies the resource's etag that should be used when creating
+ // the HTTP GET request's If-Match header
+ ETag ETag
+}
+
+// FailedReadNotifier is a function type that represents the notification function called when a read fails
+type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
+
+// RetryReaderOptions contains properties which can help to decide when to do retry.
+type RetryReaderOptions struct {
+ // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
+ // while reading from a RetryReader. A value of zero means that no additional HTTP
+ // GET requests will be made.
+ MaxRetryRequests int
+ doInjectError bool
+ doInjectErrorRound int
+
+ // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
+ NotifyFailedRead FailedReadNotifier
+
+ // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
+ // retryReader has the following special behaviour: closing the response body before it is all read is treated as a
+ // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
+ // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
+ // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
+ // treated as a fatal (non-retryable) error.
+ // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
+ // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
+ // which will be retried.
+ TreatEarlyCloseAsError bool
+}
+
+// retryReader implements io.ReaderCloser methods.
+// retryReader tries to read from response, and if there is retriable network error
+// returned during reading, it will retry according to retry reader option through executing
+// user defined action with provided data to get a new response, and continue the overall reading process
+// through reading from the new response.
+type retryReader struct {
+ ctx context.Context
+ info HTTPGetterInfo
+ countWasBounded bool
+ o RetryReaderOptions
+ getter HTTPGetter
+
+ // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
+ responseMu *sync.Mutex
+ response *http.Response
+}
+
+// NewRetryReader creates a retry reader.
+func NewRetryReader(ctx context.Context, initialResponse *http.Response,
+ info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
+ if getter == nil {
+ panic("getter must not be nil")
+ }
+ if info.Count < 0 {
+ panic("info.Count must be >= 0")
+ }
+ if o.MaxRetryRequests < 0 {
+ panic("o.MaxRetryRequests must be >= 0")
+ }
+ return &retryReader{
+ ctx: ctx,
+ getter: getter,
+ info: info,
+ countWasBounded: info.Count != CountToEnd,
+ response: initialResponse,
+ responseMu: &sync.Mutex{},
+ o: o}
+}
+
+func (s *retryReader) setResponse(r *http.Response) {
+ s.responseMu.Lock()
+ defer s.responseMu.Unlock()
+ s.response = r
+}
+
+func (s *retryReader) Read(p []byte) (n int, err error) {
+ for try := 0; ; try++ {
+ //fmt.Println(try) // Comment out for debugging.
+ if s.countWasBounded && s.info.Count == CountToEnd {
+ // User specified an original count and the remaining bytes are 0, return 0, EOF
+ return 0, io.EOF
+ }
+
+ s.responseMu.Lock()
+ resp := s.response
+ s.responseMu.Unlock()
+ if resp == nil { // We don't have a response stream to read from, try to get one.
+ newResponse, err := s.getter(s.ctx, s.info)
+ if err != nil {
+ return 0, err
+ }
+ // Successful GET; this is the network stream we'll read from.
+ s.setResponse(newResponse)
+ resp = newResponse
+ }
+ n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
+
+ // Injection mechanism for testing.
+ if s.o.doInjectError && try == s.o.doInjectErrorRound {
+ err = &net.DNSError{IsTemporary: true}
+ }
+
+ // We successfully read data or end EOF.
+ if err == nil || err == io.EOF {
+ s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
+ if s.info.Count != CountToEnd {
+ s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
+ }
+ return n, err // Return the return to the caller
+ }
+ s.Close() // Error, close stream
+ s.setResponse(nil) // Our stream is no longer good
+
+ // Check the retry count and error code, and decide whether to retry.
+ retriesExhausted := try >= s.o.MaxRetryRequests
+ _, isNetError := err.(net.Error)
+ willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
+
+ // Notify, for logging purposes, of any failures
+ if s.o.NotifyFailedRead != nil {
+ failureCount := try + 1 // because try is zero-based
+ s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
+ }
+
+ if willRetry {
+ continue
+ // Loop around and try to get and read from new stream.
+ }
+ return n, err // Not retryable, or retries exhausted, so just return
+ }
+}
+
+// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
+// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
+// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
+// which is exactly the behaviour we want.
+// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
+// then there are two different types of error that may happen - either the one one we check for here,
+// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
+// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
+func (s *retryReader) wasRetryableEarlyClose(err error) bool {
+ if s.o.TreatEarlyCloseAsError {
+ return false // user wants all early closes to be errors, and so not retryable
+ }
+ // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
+ return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
+}
+
+const ReadOnClosedBodyMessage = "read on closed response body"
+
+func (s *retryReader) Close() error {
+ s.responseMu.Lock()
+ defer s.responseMu.Unlock()
+ if s.response != nil && s.response.Body != nil {
+ return s.response.Body.Close()
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_sas_account.go
new file mode 100644
index 0000000..94895a6
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_sas_account.go
@@ -0,0 +1,218 @@
+package azfile
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
+type AccountSASSignatureValues struct {
+ Version string `param:"sv"` // If not specified, this defaults to SASVersion
+ Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
+ StartTime time.Time `param:"st"` // Not specified if IsZero
+ ExpiryTime time.Time `param:"se"` // Not specified if IsZero
+ Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
+ IPRange IPRange `param:"sip"`
+ Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
+ ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
+}
+
+// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
+// the proper SAS query parameters.
+func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
+ if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
+ return SASQueryParameters{}, errors.New("Account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
+ }
+ if v.Version == "" {
+ v.Version = SASVersion
+ }
+ perms := &AccountSASPermissions{}
+ if err := perms.Parse(v.Permissions); err != nil {
+ return SASQueryParameters{}, err
+ }
+ v.Permissions = perms.String()
+
+ startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
+
+ stringToSign := strings.Join([]string{
+ sharedKeyCredential.AccountName(),
+ v.Permissions,
+ v.Services,
+ v.ResourceTypes,
+ startTime,
+ expiryTime,
+ v.IPRange.String(),
+ string(v.Protocol),
+ v.Version,
+ ""}, // That right, the account SAS requires a terminating extra newline
+ "\n")
+
+ signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
+ p := SASQueryParameters{
+ // Common SAS parameters
+ version: v.Version,
+ protocol: v.Protocol,
+ startTime: v.StartTime,
+ expiryTime: v.ExpiryTime,
+ permissions: v.Permissions,
+ ipRange: v.IPRange,
+
+ // Account-specific SAS parameters
+ services: v.Services,
+ resourceTypes: v.ResourceTypes,
+
+ // Calculated SAS signature
+ signature: signature,
+ }
+ return p, nil
+}
+
+// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
+// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
+type AccountSASPermissions struct {
+ Read, Write, Delete, List, Add, Create, Update, Process bool
+}
+
+// String produces the SAS permissions string for an Azure Storage account.
+// Call this method to set AccountSASSignatureValues's Permissions field.
+func (p AccountSASPermissions) String() string {
+ var buffer bytes.Buffer
+ if p.Read {
+ buffer.WriteRune('r')
+ }
+ if p.Write {
+ buffer.WriteRune('w')
+ }
+ if p.Delete {
+ buffer.WriteRune('d')
+ }
+ if p.List {
+ buffer.WriteRune('l')
+ }
+ if p.Add {
+ buffer.WriteRune('a')
+ }
+ if p.Create {
+ buffer.WriteRune('c')
+ }
+ if p.Update {
+ buffer.WriteRune('u')
+ }
+ if p.Process {
+ buffer.WriteRune('p')
+ }
+ return buffer.String()
+}
+
+// Parse initializes the AccountSASPermissions's fields from a string.
+func (p *AccountSASPermissions) Parse(s string) error {
+ *p = AccountSASPermissions{} // Clear out the flags
+ for _, r := range s {
+ switch r {
+ case 'r':
+ p.Read = true
+ case 'w':
+ p.Write = true
+ case 'd':
+ p.Delete = true
+ case 'l':
+ p.List = true
+ case 'a':
+ p.Add = true
+ case 'c':
+ p.Create = true
+ case 'u':
+ p.Update = true
+ case 'p':
+ p.Process = true
+ default:
+ return fmt.Errorf("Invalid permission character: '%v'", r)
+ }
+ }
+ return nil
+}
+
+// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
+// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
+type AccountSASServices struct {
+ Blob, Queue, File bool
+}
+
+// String produces the SAS services string for an Azure Storage account.
+// Call this method to set AccountSASSignatureValues's Services field.
+func (s AccountSASServices) String() string {
+ var buffer bytes.Buffer
+ if s.Blob {
+ buffer.WriteRune('b')
+ }
+ if s.Queue {
+ buffer.WriteRune('q')
+ }
+ if s.File {
+ buffer.WriteRune('f')
+ }
+ return buffer.String()
+}
+
+// Parse initializes the AccountSASServices' fields from a string.
+func (a *AccountSASServices) Parse(s string) error {
+ *a = AccountSASServices{} // Clear out the flags
+ for _, r := range s {
+ switch r {
+ case 'b':
+ a.Blob = true
+ case 'q':
+ a.Queue = true
+ case 'f':
+ a.File = true
+ default:
+ return fmt.Errorf("Invalid service character: '%v'", r)
+ }
+ }
+ return nil
+}
+
+// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
+// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
+type AccountSASResourceTypes struct {
+ Service, Container, Object bool
+}
+
+// String produces the SAS resource types string for an Azure Storage account.
+// Call this method to set AccountSASSignatureValues's ResourceTypes field.
+func (rt AccountSASResourceTypes) String() string {
+ var buffer bytes.Buffer
+ if rt.Service {
+ buffer.WriteRune('s')
+ }
+ if rt.Container {
+ buffer.WriteRune('c')
+ }
+ if rt.Object {
+ buffer.WriteRune('o')
+ }
+ return buffer.String()
+}
+
+// Parse initializes the AccountSASResourceType's fields from a string.
+func (rt *AccountSASResourceTypes) Parse(s string) error {
+ *rt = AccountSASResourceTypes{} // Clear out the flags
+ for _, r := range s {
+ switch r {
+ case 's':
+ rt.Service = true
+ case 'c':
+ rt.Container = true
+ case 'o':
+ rt.Object = true
+ default:
+ return fmt.Errorf("Invalid resource type: '%v'", r)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_sas_query_params.go
new file mode 100644
index 0000000..fe1fdc4
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_sas_query_params.go
@@ -0,0 +1,296 @@
+package azfile
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// SASVersion indicates the SAS version.
+const SASVersion = ServiceVersion
+
+type SASProtocol string
+
+const (
+ // SASProtocolHTTPS can be specified for a SAS protocol
+ SASProtocolHTTPS SASProtocol = "https"
+
+ // SASProtocolHTTPSandHTTP can be specified for a SAS protocol
+ SASProtocolHTTPSandHTTP SASProtocol = "https,http"
+)
+
+// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
+// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
+func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) {
+ ss := ""
+ if !startTime.IsZero() {
+ ss = formatSASTimeWithDefaultFormat(&startTime)
+ }
+ se := ""
+ if !expiryTime.IsZero() {
+ se = formatSASTimeWithDefaultFormat(&expiryTime)
+ }
+ return ss, se
+}
+
+// SASTimeFormats represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
+var SASTimeFormats = []string{"2006-01-02T15:04:05Z", "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details.
+
+// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ".
+func formatSASTimeWithDefaultFormat(t *time.Time) string {
+ return formatSASTime(t, SASTimeFormats[0]) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
+}
+
+// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default.
+func formatSASTime(t *time.Time, format string) string {
+ if format != "" {
+ return t.Format(format)
+ }
+ return t.Format(SASTimeFormats[0]) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
+}
+
+// parseSASTimeString try to parse sas time string.
+func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) {
+ for _, sasTimeFormat := range SASTimeFormats {
+ t, err = time.Parse(sasTimeFormat, val)
+ if err == nil {
+ timeFormat = sasTimeFormat
+ break
+ }
+ }
+
+ if err != nil {
+ err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details")
+ }
+
+ return
+}
+
+// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
+
+// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
+// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
+// to a query parameter map by calling AddToValues().
+// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
+//
+// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
+type SASQueryParameters struct {
+ // All members are immutable or values so copies of this struct are goroutine-safe.
+ version string `param:"sv"`
+ services string `param:"ss"`
+ resourceTypes string `param:"srt"`
+ protocol SASProtocol `param:"spr"`
+ startTime time.Time `param:"st"`
+ expiryTime time.Time `param:"se"`
+ ipRange IPRange `param:"sip"`
+ identifier string `param:"si"`
+ resource string `param:"sr"`
+ permissions string `param:"sp"`
+ signature string `param:"sig"`
+ cacheControl string `param:"rscc"`
+ contentDisposition string `param:"rscd"`
+ contentEncoding string `param:"rsce"`
+ contentLanguage string `param:"rscl"`
+ contentType string `param:"rsct"`
+
+ // private member used for startTime and expiryTime formatting.
+ stTimeFormat string
+ seTimeFormat string
+}
+
+func (p *SASQueryParameters) Version() string {
+ return p.version
+}
+
+func (p *SASQueryParameters) Services() string {
+ return p.services
+}
+func (p *SASQueryParameters) ResourceTypes() string {
+ return p.resourceTypes
+}
+func (p *SASQueryParameters) Protocol() SASProtocol {
+ return p.protocol
+}
+func (p *SASQueryParameters) StartTime() time.Time {
+ return p.startTime
+}
+func (p *SASQueryParameters) ExpiryTime() time.Time {
+ return p.expiryTime
+}
+
+func (p *SASQueryParameters) IPRange() IPRange {
+ return p.ipRange
+}
+
+func (p *SASQueryParameters) Identifier() string {
+ return p.identifier
+}
+
+func (p *SASQueryParameters) Resource() string {
+ return p.resource
+}
+func (p *SASQueryParameters) Permissions() string {
+ return p.permissions
+}
+
+func (p *SASQueryParameters) Signature() string {
+ return p.signature
+}
+
+func (p *SASQueryParameters) CacheControl() string {
+ return p.cacheControl
+}
+
+func (p *SASQueryParameters) ContentDisposition() string {
+ return p.contentDisposition
+}
+
+func (p *SASQueryParameters) ContentEncoding() string {
+ return p.contentEncoding
+}
+
+func (p *SASQueryParameters) ContentLanguage() string {
+ return p.contentLanguage
+}
+
+func (p *SASQueryParameters) ContentType() string {
+ return p.contentType
+}
+
+// IPRange represents a SAS IP range's start IP and (optionally) end IP.
+type IPRange struct {
+ Start net.IP // Not specified if length = 0
+ End net.IP // Not specified if length = 0
+}
+
+// String returns a string representation of an IPRange.
+func (ipr *IPRange) String() string {
+ if len(ipr.Start) == 0 {
+ return ""
+ }
+ start := ipr.Start.String()
+ if len(ipr.End) == 0 {
+ return start
+ }
+ return start + "-" + ipr.End.String()
+}
+
+// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
+// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
+// all SAS-related query parameters are removed from the passed-in map. If
+// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
+func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters {
+ p := SASQueryParameters{}
+ for k, v := range values {
+ val := v[0]
+ isSASKey := true
+ switch strings.ToLower(k) {
+ case "sv":
+ p.version = val
+ case "ss":
+ p.services = val
+ case "srt":
+ p.resourceTypes = val
+ case "spr":
+ p.protocol = SASProtocol(val)
+ case "st":
+ p.startTime, p.stTimeFormat, _ = parseSASTimeString(val)
+ case "se":
+ p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val)
+ case "sip":
+ dashIndex := strings.Index(val, "-")
+ if dashIndex == -1 {
+ p.ipRange.Start = net.ParseIP(val)
+ } else {
+ p.ipRange.Start = net.ParseIP(val[:dashIndex])
+ p.ipRange.End = net.ParseIP(val[dashIndex+1:])
+ }
+ case "si":
+ p.identifier = val
+ case "sr":
+ p.resource = val
+ case "sp":
+ p.permissions = val
+ case "sig":
+ p.signature = val
+ case "rscc":
+ p.cacheControl = val
+ case "rscd":
+ p.contentDisposition = val
+ case "rsce":
+ p.contentEncoding = val
+ case "rscl":
+ p.contentLanguage = val
+ case "rsct":
+ p.contentType = val
+ default:
+ isSASKey = false // We didn't recognize the query parameter
+ }
+ if isSASKey && deleteSASParametersFromValues {
+ delete(values, k)
+ }
+ }
+ return p
+}
+
+// AddToValues adds the SAS components to the specified query parameters map.
+func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
+ if p.version != "" {
+ v.Add("sv", p.version)
+ }
+ if p.services != "" {
+ v.Add("ss", p.services)
+ }
+ if p.resourceTypes != "" {
+ v.Add("srt", p.resourceTypes)
+ }
+ if p.protocol != "" {
+ v.Add("spr", string(p.protocol))
+ }
+ if !p.startTime.IsZero() {
+ v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat))
+ }
+ if !p.expiryTime.IsZero() {
+ v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat))
+ }
+ if len(p.ipRange.Start) > 0 {
+ v.Add("sip", p.ipRange.String())
+ }
+ if p.identifier != "" {
+ v.Add("si", p.identifier)
+ }
+ if p.resource != "" {
+ v.Add("sr", p.resource)
+ }
+ if p.permissions != "" {
+ v.Add("sp", p.permissions)
+ }
+ if p.signature != "" {
+ v.Add("sig", p.signature)
+ }
+ if p.cacheControl != "" {
+ v.Add("rscc", p.cacheControl)
+ }
+ if p.contentDisposition != "" {
+ v.Add("rscd", p.contentDisposition)
+ }
+ if p.contentEncoding != "" {
+ v.Add("rsce", p.contentEncoding)
+ }
+ if p.contentLanguage != "" {
+ v.Add("rscl", p.contentLanguage)
+ }
+ if p.contentType != "" {
+ v.Add("rsct", p.contentType)
+ }
+ return v
+}
+
+// Encode encodes the SAS query parameters into URL encoded form sorted by key.
+func (p *SASQueryParameters) Encode() string {
+ v := url.Values{}
+ p.addToValues(v)
+ return v.Encode()
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_service_codes_common.go
new file mode 100644
index 0000000..9b69797
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_service_codes_common.go
@@ -0,0 +1,131 @@
+package azfile
+
+// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
+
+const (
+ // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code.
+ ServiceCodeNone ServiceCodeType = ""
+
+ // ServiceCodeAccountAlreadyExists means the specified account already exists.
+ ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists"
+
+ // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403).
+ ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated"
+
+ // ServiceCodeAccountIsDisabled means the specified account is disabled (403).
+ ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled"
+
+ // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403).
+ ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed"
+
+ // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400).
+ ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported"
+
+ // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412).
+ ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet"
+
+ // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400).
+ ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey"
+
+ // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403).
+ ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions"
+
+ // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500).
+ ServiceCodeInternalError ServiceCodeType = "InternalError"
+
+ // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400).
+ ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo"
+
+ // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400).
+ ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue"
+
+ // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400).
+ ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb"
+
+ // ServiceCodeInvalidInput means one of the request inputs is not valid (400).
+ ServiceCodeInvalidInput ServiceCodeType = "InvalidInput"
+
+ // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400).
+ ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5"
+
+ // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400).
+ ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata"
+
+ // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400).
+ ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue"
+
+ // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416).
+ ServiceCodeInvalidRange ServiceCodeType = "InvalidRange"
+
+ // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400).
+ ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName"
+
+ // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400).
+ ServiceCodeInvalidURI ServiceCodeType = "InvalidUri"
+
+ // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400).
+ ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument"
+
+ // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400).
+ ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue"
+
+ // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400).
+ ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch"
+
+ // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400).
+ ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge"
+
+ // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411).
+ ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader"
+
+ // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400).
+ ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter"
+
+ // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400).
+ ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader"
+
+ // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400).
+ ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode"
+
+ // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400).
+ ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported"
+
+ // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500).
+ ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut"
+
+ // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400).
+ ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput"
+
+ // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400).
+ ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue"
+
+ // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413).
+ ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge"
+
+ // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409).
+ ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch"
+
+ // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400).
+ ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse"
+
+ // ServiceCodeResourceAlreadyExists means the specified resource already exists (409).
+ ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists"
+
+ // ServiceCodeResourceNotFound means the specified resource does not exist (404).
+ ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound"
+
+ // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
+ ServiceCodeServerBusy ServiceCodeType = "ServerBusy"
+
+ // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400).
+ ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader"
+
+ // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400).
+ ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode"
+
+ // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400).
+ ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter"
+
+ // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405).
+ ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb"
+)
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_storage_error.go
new file mode 100644
index 0000000..e6c87e8
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_storage_error.go
@@ -0,0 +1,117 @@
+package azfile
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "sort"
+
+ "github.com/Azure/azure-pipeline-go/pipeline"
+)
+
+func init() {
+ // wire up our custom error handling constructor
+ responseErrorFactory = newStorageError
+}
+
+// ServiceCodeType is a string identifying a storage service error.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
+type ServiceCodeType string
+
+// StorageError identifies a responder-generated network or response parsing error.
+type StorageError interface {
+ // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response().
+ ResponseError
+
+ // ServiceCode returns a service error code. Your code can use this to make error recovery decisions.
+ ServiceCode() ServiceCodeType
+}
+
+// storageError is the internal struct that implements the public StorageError interface.
+type storageError struct {
+ responseError
+ serviceCode ServiceCodeType
+ details map[string]string
+}
+
+// newStorageError creates an error object that implements the error interface.
+func newStorageError(cause error, response *http.Response, description string) error {
+ return &storageError{
+ responseError: responseError{
+ ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
+ response: response,
+ description: description,
+ },
+ serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")),
+ }
+}
+
+// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
+func (e *storageError) ServiceCode() ServiceCodeType {
+ return e.serviceCode
+}
+
+// Error implements the error interface's Error method to return a string representation of the error.
+func (e *storageError) Error() string {
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode)
+ fmt.Fprintf(b, "Description=%s, Details: ", e.description)
+ if len(e.details) == 0 {
+ b.WriteString("(none)\n")
+ } else {
+ b.WriteRune('\n')
+ keys := make([]string, 0, len(e.details))
+ // Alphabetize the details
+ for k := range e.details {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ fmt.Fprintf(b, " %s: %+v\n", k, e.details[k])
+ }
+ }
+ req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
+ pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil)
+ return e.ErrorNode.Error(b.String())
+}
+
+// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
+func (e *storageError) Temporary() bool {
+ if e.response != nil {
+ if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) {
+ return true
+ }
+ }
+ return e.ErrorNode.Temporary()
+}
+
+// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
+func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
+ tokName := ""
+ var t xml.Token
+ for t, err = d.Token(); err == nil; t, err = d.Token() {
+ switch tt := t.(type) {
+ case xml.StartElement:
+ tokName = tt.Name.Local
+ break
+ case xml.CharData:
+ switch tokName {
+ case "Message":
+ e.description = string(tt)
+ default:
+ if e.details == nil {
+ e.details = map[string]string{}
+ }
+ e.details[tokName] = string(tt)
+ }
+ }
+ }
+ return nil
+}
+
+// Make it clear that a panic occurred due to sanity checks failing
+// This means the user should correct the programming errors
+func sanityCheckFailed(msg string) {
+ panic("Sanity check failed: " + msg)
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_util_validate.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_util_validate.go
new file mode 100644
index 0000000..37f1be3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_util_validate.go
@@ -0,0 +1,68 @@
+package azfile
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+)
+
+const (
+ // CountToEnd indiciates a flag for count parameter. It means the count of bytes
+ // from start offset to the end of file.
+ CountToEnd = 0
+)
+
+// httpRange defines a range of bytes within an HTTP resource, starting at offset and
+// ending at offset+count-1 inclusively.
+// An httpRange which has a zero-value offset, and a count with value CountToEnd indicates the entire resource.
+// An httpRange which has a non zero-value offset but a count with value CountToEnd indicates from the offset to the resource's end.
+type httpRange struct {
+ offset int64
+ count int64
+}
+
+func (r httpRange) pointers() *string {
+ if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
+ return nil // No specified range
+ }
+
+ return toRange(r.offset, r.count)
+}
+
+// toRange makes range string adhere to REST API.
+// A count with value CountToEnd means count of bytes from offset to the end of file.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-the-range-header-for-file-service-operations.
+func toRange(offset int64, count int64) *string {
+ // No additional validation by design. API can validate parameter by case, and use this method.
+ endRange := ""
+ if count != CountToEnd {
+ endRange = strconv.FormatInt(offset+count-1, 10)
+ }
+ r := fmt.Sprintf("bytes=%d-%s", offset, endRange)
+ return &r
+}
+
+func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 {
+ if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
+ return 0
+ }
+ validateSeekableStreamAt0(body)
+ count, err := body.Seek(0, io.SeekEnd)
+ if err != nil {
+ sanityCheckFailed(err.Error())
+ }
+ body.Seek(0, io.SeekStart)
+ return count
+}
+
+func validateSeekableStreamAt0(body io.ReadSeeker) {
+ if body == nil { // nil body's are "logically" seekable to 0
+ return
+ }
+ if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
+ if err != nil {
+ sanityCheckFailed(err.Error())
+ }
+ sanityCheckFailed("stream must be set to position 0")
+ }
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_uuid.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_uuid.go
new file mode 100644
index 0000000..b77be6d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zc_uuid.go
@@ -0,0 +1,77 @@
+package azfile
+
+import (
+ "crypto/rand"
+ "fmt"
+ "strconv"
+)
+
+// The UUID reserved variants.
+const (
+ reservedNCS byte = 0x80
+ reservedRFC4122 byte = 0x40
+ reservedMicrosoft byte = 0x20
+ reservedFuture byte = 0x00
+)
+
+// A UUID representation compliant with specification in RFC 4122 document.
+type uuid [16]byte
+
+// NewUUID returns a new uuid using RFC 4122 algorithm.
+func newUUID() (u uuid) {
+ u = uuid{}
+ // Set all bits to randomly (or pseudo-randomly) chosen values.
+ rand.Read(u[:])
+ u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
+
+ var version byte = 4
+ u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
+ return
+}
+
+// String returns an unparsed version of the generated UUID sequence.
+func (u uuid) String() string {
+ return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
+
+// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f"
+// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
+func parseUUID(uuidStr string) uuid {
+ char := func(hexString string) byte {
+ i, _ := strconv.ParseUint(hexString, 16, 8)
+ return byte(i)
+ }
+ if uuidStr[0] == '{' {
+ uuidStr = uuidStr[1:] // Skip over the '{'
+ }
+ // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f
+ // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33
+ // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45
+ uuidVal := uuid{
+ char(uuidStr[0:2]),
+ char(uuidStr[2:4]),
+ char(uuidStr[4:6]),
+ char(uuidStr[6:8]),
+
+ char(uuidStr[9:11]),
+ char(uuidStr[11:13]),
+
+ char(uuidStr[14:16]),
+ char(uuidStr[16:18]),
+
+ char(uuidStr[19:21]),
+ char(uuidStr[21:23]),
+
+ char(uuidStr[24:26]),
+ char(uuidStr[26:28]),
+ char(uuidStr[28:30]),
+ char(uuidStr[30:32]),
+ char(uuidStr[32:34]),
+ char(uuidStr[34:36]),
+ }
+ return uuidVal
+}
+
+func (u uuid) bytes() []byte {
+ return u[:]
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zt_doc.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zt_doc.go
new file mode 100644
index 0000000..054c5ea
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zt_doc.go
@@ -0,0 +1,83 @@
+// Copyright 2017 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by an MIT
+// license that can be found in the LICENSE file.
+
+/*
+Package azfile allows you to manipulate Azure Storage shares, directories and files objects.
+
+URL Types
+
+The most common types you'll work with are the XxxURL types. The methods of these types make requests
+against the Azure Storage Service.
+
+ - ServiceURL's methods perform operations on a storage account.
+ - ShareURL's methods perform operations on an account's share.
+ -DirectoryURL's methods perform operations on a share's directory.
+ -FileURL's methods perform operations on a directory's file.
+
+Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP
+request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed.
+The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more.
+
+Pipelines are thread-safe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass
+an initial pipeline. When you call ServiceURL's NewShareURL method, the new ShareURL object has its own
+URL but it shares the same pipeline as the parent ServiceURL object.
+
+To work with a directory, call ShareURL or DirectoryURL's NewDirectoryURL method to get a new DirectoryURL object.
+The new DirectoryURL object has its own URL but it shares the same pipeline as the parent XxxURL object.
+
+To work with a file, call DirectoryURL's NewFileURL method. When you call DirectoryURL's NewFileURL,
+the new FileURL object has its own URL but it shares the same pipeline as the parent DirectoryURL object.
+
+If you'd like to use a different pipeline with a ServiceURL, ShareURL, DirectoryURL or FileURL object, then call the XxxURL
+object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object
+with the same URL as the original but with the specified pipeline.
+
+Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that
+XxxURL objects share a lot of system resources making them very efficient.
+
+All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures,
+transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an
+example of how to do deal with errors.
+
+URL and Shared Access Signature Manipulation
+
+The library includes a FileURLParts type for deconstructing and reconstructing URLs. And you can use the following there types
+for generating and parsing Shared Access Signature (SAS)
+ - Use the AccountSASSignatureValues type to create a SAS for a storage account.
+ - Use the FileSASSignatureValues type to create a SAS for a share or file.
+ - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters.
+
+To generate a SAS, you must use the SharedKeyCredential type.
+
+Credentials
+
+When creating a request pipeline, you must specify one of this package's credential types.
+ - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS).
+ - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this
+ to generate Shared Access Signatures.
+
+HTTP Request Policy Factories
+
+This package defines several request policy factories for use with the pipeline package.
+Most applications will not use these factories directly; instead, the NewPipeline
+function creates these factories, initializes them (via the PipelineOptions type)
+and returns a pipeline object for use by the XxxURL objects.
+
+However, for advanced scenarios, developers can access these policy factories directly
+and even create their own and then construct their own pipeline in order to affect HTTP
+requests and responses performed by the XxxURL objects. For example, developers can
+introduce their own logging, random failures, request recording & playback for fast
+testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The
+possibilities are endless!
+
+Below are the request pipeline policy factory functions that are provided with this
+package:
+ - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests.
+ - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures.
+ - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests.
+ - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures.
+
+Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline.
+*/
+package azfile
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_client.go
new file mode 100644
index 0000000..c2a7014
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_client.go
@@ -0,0 +1,38 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "net/url"
+)
+
+const (
+ // ServiceVersion specifies the version of the operations used in this package.
+ ServiceVersion = "2019-02-02"
+)
+
+// managementClient is the base client for Azfile.
+type managementClient struct {
+ url url.URL
+ p pipeline.Pipeline
+}
+
+// newManagementClient creates an instance of the managementClient client.
+func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient {
+ return managementClient{
+ url: url,
+ p: p,
+ }
+}
+
+// URL returns a copy of the URL for this client.
+func (mc managementClient) URL() url.URL {
+ return mc.url
+}
+
+// Pipeline returns the pipeline for this client.
+func (mc managementClient) Pipeline() pipeline.Pipeline {
+ return mc.p
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_directory.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_directory.go
new file mode 100644
index 0000000..618485b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_directory.go
@@ -0,0 +1,567 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/xml"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// directoryClient is the client for the Directory methods of the Azfile service.
+type directoryClient struct {
+ managementClient
+}
+
+// newDirectoryClient creates an instance of the directoryClient client.
+func newDirectoryClient(url url.URL, p pipeline.Pipeline) directoryClient {
+ return directoryClient{newManagementClient(url, p)}
+}
+
+// Create creates a new directory under the specified share or parent directory.
+//
+// fileAttributes is if specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and
+// ‘Directory’ for directory. ‘None’ can also be specified as default. fileCreationTime is creation time for the
+// file/directory. Default value: Now. fileLastWriteTime is last write time for the file/directory. Default value: Now.
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+// filePermission is if specified the permission (security descriptor) shall be set for the directory/file. This header
+// can be used if Permission size is <= 8KB, else x-ms-file-permission-key header shall be used. Default value:
+// Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the
+// x-ms-file-permission or x-ms-file-permission-key should be specified. filePermissionKey is key of the permission to
+// be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be
+// specified.
+func (client directoryClient) Create(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, metadata map[string]string, filePermission *string, filePermissionKey *string) (*DirectoryCreateResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.createPreparer(fileAttributes, fileCreationTime, fileLastWriteTime, timeout, metadata, filePermission, filePermissionKey)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DirectoryCreateResponse), err
+}
+
+// createPreparer prepares the Create request.
+func (client directoryClient) createPreparer(fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, metadata map[string]string, filePermission *string, filePermissionKey *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "directory")
+ req.URL.RawQuery = params.Encode()
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if filePermission != nil {
+ req.Header.Set("x-ms-file-permission", *filePermission)
+ }
+ if filePermissionKey != nil {
+ req.Header.Set("x-ms-file-permission-key", *filePermissionKey)
+ }
+ req.Header.Set("x-ms-file-attributes", fileAttributes)
+ req.Header.Set("x-ms-file-creation-time", fileCreationTime)
+ req.Header.Set("x-ms-file-last-write-time", fileLastWriteTime)
+ return req, nil
+}
+
+// createResponder handles the response to the Create request.
+func (client directoryClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &DirectoryCreateResponse{rawResponse: resp.Response()}, err
+}
+
+// Delete removes the specified empty directory. Note that the directory must be empty before it can be deleted.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client directoryClient) Delete(ctx context.Context, timeout *int32) (*DirectoryDeleteResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.deletePreparer(timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DirectoryDeleteResponse), err
+}
+
+// deletePreparer prepares the Delete request.
+func (client directoryClient) deletePreparer(timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("DELETE", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "directory")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// deleteResponder handles the response to the Delete request.
+func (client directoryClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &DirectoryDeleteResponse{rawResponse: resp.Response()}, err
+}
+
+// ForceCloseHandles closes all handles open for given directory.
+//
+// handleID is specifies handle ID opened on the file or directory to be closed. Asterix (‘*’) is a wildcard that
+// specifies all handles. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. marker is a string value that identifies the portion of the list to be
+// returned with the next list operation. The operation returns a marker value within the response body if the list
+// returned was not complete. The marker value may then be used in a subsequent call to request the next set of list
+// items. The marker value is opaque to the client. sharesnapshot is the snapshot parameter is an opaque DateTime value
+// that, when present, specifies the share snapshot to query. recursive is specifies operation should apply to the
+// directory specified in the URI, its files, its subdirectories and their files.
+func (client directoryClient) ForceCloseHandles(ctx context.Context, handleID string, timeout *int32, marker *string, sharesnapshot *string, recursive *bool) (*DirectoryForceCloseHandlesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.forceCloseHandlesPreparer(handleID, timeout, marker, sharesnapshot, recursive)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.forceCloseHandlesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DirectoryForceCloseHandlesResponse), err
+}
+
+// forceCloseHandlesPreparer prepares the ForceCloseHandles request.
+func (client directoryClient) forceCloseHandlesPreparer(handleID string, timeout *int32, marker *string, sharesnapshot *string, recursive *bool) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ params.Set("comp", "forceclosehandles")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-handle-id", handleID)
+ if recursive != nil {
+ req.Header.Set("x-ms-recursive", strconv.FormatBool(*recursive))
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// forceCloseHandlesResponder handles the response to the ForceCloseHandles request.
+func (client directoryClient) forceCloseHandlesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &DirectoryForceCloseHandlesResponse{rawResponse: resp.Response()}, err
+}
+
+// GetProperties returns all system properties for the specified directory, and can also be used to check the existence
+// of a directory. The data returned does not include the files in the directory or any subdirectories.
+//
+// sharesnapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot
+// to query. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client directoryClient) GetProperties(ctx context.Context, sharesnapshot *string, timeout *int32) (*DirectoryGetPropertiesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getPropertiesPreparer(sharesnapshot, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DirectoryGetPropertiesResponse), err
+}
+
+// getPropertiesPreparer prepares the GetProperties request.
+func (client directoryClient) getPropertiesPreparer(sharesnapshot *string, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "directory")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getPropertiesResponder handles the response to the GetProperties request.
+func (client directoryClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &DirectoryGetPropertiesResponse{rawResponse: resp.Response()}, err
+}
+
+// ListFilesAndDirectoriesSegment returns a list of files or directories under the specified share or directory. It
+// lists the contents only for a single level of the directory hierarchy.
+//
+// prefix is filters the results to return only entries whose name begins with the specified prefix. sharesnapshot is
+// the snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. marker
+// is a string value that identifies the portion of the list to be returned with the next list operation. The operation
+// returns a marker value within the response body if the list returned was not complete. The marker value may then be
+// used in a subsequent call to request the next set of list items. The marker value is opaque to the client.
+// maxresults is specifies the maximum number of entries to return. If the request does not specify maxresults, or
+// specifies a value greater than 5,000, the server will return up to 5,000 items. timeout is the timeout parameter is
+// expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client directoryClient) ListFilesAndDirectoriesSegment(ctx context.Context, prefix *string, sharesnapshot *string, marker *string, maxresults *int32, timeout *int32) (*ListFilesAndDirectoriesSegmentResponse, error) {
+ if err := validate([]validation{
+ {targetValue: maxresults,
+ constraints: []constraint{{target: "maxresults", name: null, rule: false,
+ chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.listFilesAndDirectoriesSegmentPreparer(prefix, sharesnapshot, marker, maxresults, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listFilesAndDirectoriesSegmentResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ListFilesAndDirectoriesSegmentResponse), err
+}
+
+// listFilesAndDirectoriesSegmentPreparer prepares the ListFilesAndDirectoriesSegment request.
+func (client directoryClient) listFilesAndDirectoriesSegmentPreparer(prefix *string, sharesnapshot *string, marker *string, maxresults *int32, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if prefix != nil && len(*prefix) > 0 {
+ params.Set("prefix", *prefix)
+ }
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if maxresults != nil {
+ params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "directory")
+ params.Set("comp", "list")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// listFilesAndDirectoriesSegmentResponder handles the response to the ListFilesAndDirectoriesSegment request.
+func (client directoryClient) listFilesAndDirectoriesSegmentResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &ListFilesAndDirectoriesSegmentResponse{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// ListHandles lists handles for directory.
+//
+// marker is a string value that identifies the portion of the list to be returned with the next list operation. The
+// operation returns a marker value within the response body if the list returned was not complete. The marker value
+// may then be used in a subsequent call to request the next set of list items. The marker value is opaque to the
+// client. maxresults is specifies the maximum number of entries to return. If the request does not specify maxresults,
+// or specifies a value greater than 5,000, the server will return up to 5,000 items. timeout is the timeout parameter
+// is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. sharesnapshot is the snapshot parameter is an opaque DateTime value that,
+// when present, specifies the share snapshot to query. recursive is specifies operation should apply to the directory
+// specified in the URI, its files, its subdirectories and their files.
+func (client directoryClient) ListHandles(ctx context.Context, marker *string, maxresults *int32, timeout *int32, sharesnapshot *string, recursive *bool) (*ListHandlesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: maxresults,
+ constraints: []constraint{{target: "maxresults", name: null, rule: false,
+ chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.listHandlesPreparer(marker, maxresults, timeout, sharesnapshot, recursive)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listHandlesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ListHandlesResponse), err
+}
+
+// listHandlesPreparer prepares the ListHandles request.
+func (client directoryClient) listHandlesPreparer(marker *string, maxresults *int32, timeout *int32, sharesnapshot *string, recursive *bool) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if maxresults != nil {
+ params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ params.Set("comp", "listhandles")
+ req.URL.RawQuery = params.Encode()
+ if recursive != nil {
+ req.Header.Set("x-ms-recursive", strconv.FormatBool(*recursive))
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// listHandlesResponder handles the response to the ListHandles request.
+func (client directoryClient) listHandlesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &ListHandlesResponse{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// SetMetadata updates user defined metadata for the specified directory.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+func (client directoryClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string) (*DirectorySetMetadataResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setMetadataPreparer(timeout, metadata)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DirectorySetMetadataResponse), err
+}
+
+// setMetadataPreparer prepares the SetMetadata request.
+func (client directoryClient) setMetadataPreparer(timeout *int32, metadata map[string]string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "directory")
+ params.Set("comp", "metadata")
+ req.URL.RawQuery = params.Encode()
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// setMetadataResponder handles the response to the SetMetadata request.
+func (client directoryClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &DirectorySetMetadataResponse{rawResponse: resp.Response()}, err
+}
+
+// SetProperties sets properties on the directory.
+//
+// fileAttributes is if specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and
+// ‘Directory’ for directory. ‘None’ can also be specified as default. fileCreationTime is creation time for the
+// file/directory. Default value: Now. fileLastWriteTime is last write time for the file/directory. Default value: Now.
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. filePermission is if specified the permission (security descriptor) shall
+// be set for the directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file-permission-key
+// header shall be used. Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl.
+// Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. filePermissionKey is key
+// of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or
+// x-ms-file-permission-key should be specified.
+func (client directoryClient) SetProperties(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, filePermission *string, filePermissionKey *string) (*DirectorySetPropertiesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setPropertiesPreparer(fileAttributes, fileCreationTime, fileLastWriteTime, timeout, filePermission, filePermissionKey)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DirectorySetPropertiesResponse), err
+}
+
+// setPropertiesPreparer prepares the SetProperties request.
+func (client directoryClient) setPropertiesPreparer(fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, filePermission *string, filePermissionKey *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "directory")
+ params.Set("comp", "properties")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if filePermission != nil {
+ req.Header.Set("x-ms-file-permission", *filePermission)
+ }
+ if filePermissionKey != nil {
+ req.Header.Set("x-ms-file-permission-key", *filePermissionKey)
+ }
+ req.Header.Set("x-ms-file-attributes", fileAttributes)
+ req.Header.Set("x-ms-file-creation-time", fileCreationTime)
+ req.Header.Set("x-ms-file-last-write-time", fileLastWriteTime)
+ return req, nil
+}
+
+// setPropertiesResponder handles the response to the SetProperties request.
+func (client directoryClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &DirectorySetPropertiesResponse{rawResponse: resp.Response()}, err
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_file.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_file.go
new file mode 100644
index 0000000..e3fa1b1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_file.go
@@ -0,0 +1,906 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/xml"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// fileClient is the client for the File methods of the Azfile service.
+type fileClient struct {
+ managementClient
+}
+
+// newFileClient creates an instance of the fileClient client.
+func newFileClient(url url.URL, p pipeline.Pipeline) fileClient {
+ return fileClient{newManagementClient(url, p)}
+}
+
+// AbortCopy aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
+//
+// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy File operation. timeout is
+// the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client fileClient) AbortCopy(ctx context.Context, copyID string, timeout *int32) (*FileAbortCopyResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.abortCopyPreparer(copyID, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileAbortCopyResponse), err
+}
+
+// abortCopyPreparer prepares the AbortCopy request.
+func (client fileClient) abortCopyPreparer(copyID string, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ params.Set("copyid", copyID)
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "copy")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-copy-action", "abort")
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// abortCopyResponder handles the response to the AbortCopy request.
+func (client fileClient) abortCopyResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusNoContent)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileAbortCopyResponse{rawResponse: resp.Response()}, err
+}
+
+// Create creates a new file or replaces a file. Note it only initializes the file with no content.
+//
+// fileContentLength is specifies the maximum size for the file, up to 1 TB. fileAttributes is if specified, the
+// provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can
+// also be specified as default. fileCreationTime is creation time for the file/directory. Default value: Now.
+// fileLastWriteTime is last write time for the file/directory. Default value: Now. timeout is the timeout parameter is
+// expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. fileContentType is sets the MIME content type of the file. The default
+// type is 'application/octet-stream'. fileContentEncoding is specifies which content encodings have been applied to
+// the file. fileContentLanguage is specifies the natural languages used by this resource. fileCacheControl is sets the
+// file's cache control. The File service stores this value but does not use or modify it. fileContentMD5 is sets the
+// file's MD5 hash. fileContentDisposition is sets the file's Content-Disposition header. metadata is a name-value pair
+// to associate with a file storage object. filePermission is if specified the permission (security descriptor) shall
+// be set for the directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file-permission-key
+// header shall be used. Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl.
+// Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. filePermissionKey is key
+// of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or
+// x-ms-file-permission-key should be specified.
+func (client fileClient) Create(ctx context.Context, fileContentLength int64, fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, fileContentType *string, fileContentEncoding *string, fileContentLanguage *string, fileCacheControl *string, fileContentMD5 []byte, fileContentDisposition *string, metadata map[string]string, filePermission *string, filePermissionKey *string) (*FileCreateResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.createPreparer(fileContentLength, fileAttributes, fileCreationTime, fileLastWriteTime, timeout, fileContentType, fileContentEncoding, fileContentLanguage, fileCacheControl, fileContentMD5, fileContentDisposition, metadata, filePermission, filePermissionKey)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileCreateResponse), err
+}
+
+// createPreparer prepares the Create request.
+func (client fileClient) createPreparer(fileContentLength int64, fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, fileContentType *string, fileContentEncoding *string, fileContentLanguage *string, fileCacheControl *string, fileContentMD5 []byte, fileContentDisposition *string, metadata map[string]string, filePermission *string, filePermissionKey *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ req.Header.Set("x-ms-content-length", strconv.FormatInt(fileContentLength, 10))
+ req.Header.Set("x-ms-type", "file")
+ if fileContentType != nil {
+ req.Header.Set("x-ms-content-type", *fileContentType)
+ }
+ if fileContentEncoding != nil {
+ req.Header.Set("x-ms-content-encoding", *fileContentEncoding)
+ }
+ if fileContentLanguage != nil {
+ req.Header.Set("x-ms-content-language", *fileContentLanguage)
+ }
+ if fileCacheControl != nil {
+ req.Header.Set("x-ms-cache-control", *fileCacheControl)
+ }
+ if fileContentMD5 != nil {
+ req.Header.Set("x-ms-content-md5", base64.StdEncoding.EncodeToString(fileContentMD5))
+ }
+ if fileContentDisposition != nil {
+ req.Header.Set("x-ms-content-disposition", *fileContentDisposition)
+ }
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ if filePermission != nil {
+ req.Header.Set("x-ms-file-permission", *filePermission)
+ }
+ if filePermissionKey != nil {
+ req.Header.Set("x-ms-file-permission-key", *filePermissionKey)
+ }
+ req.Header.Set("x-ms-file-attributes", fileAttributes)
+ req.Header.Set("x-ms-file-creation-time", fileCreationTime)
+ req.Header.Set("x-ms-file-last-write-time", fileLastWriteTime)
+ return req, nil
+}
+
+// createResponder handles the response to the Create request.
+func (client fileClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileCreateResponse{rawResponse: resp.Response()}, err
+}
+
+// Delete removes the file from the storage account.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client fileClient) Delete(ctx context.Context, timeout *int32) (*FileDeleteResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.deletePreparer(timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileDeleteResponse), err
+}
+
+// deletePreparer prepares the Delete request.
+func (client fileClient) deletePreparer(timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("DELETE", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// deleteResponder handles the response to the Delete request.
+func (client fileClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileDeleteResponse{rawResponse: resp.Response()}, err
+}
+
+// Download reads or downloads a file from the system, including its metadata and properties.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. rangeParameter is return file data only from the specified byte range.
+// rangeGetContentMD5 is when this header is set to true and specified together with the Range header, the service
+// returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size.
+func (client fileClient) Download(ctx context.Context, timeout *int32, rangeParameter *string, rangeGetContentMD5 *bool) (*DownloadResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.downloadPreparer(timeout, rangeParameter, rangeGetContentMD5)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*DownloadResponse), err
+}
+
+// downloadPreparer prepares the Download request.
+func (client fileClient) downloadPreparer(timeout *int32, rangeParameter *string, rangeGetContentMD5 *bool) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if rangeParameter != nil {
+ req.Header.Set("x-ms-range", *rangeParameter)
+ }
+ if rangeGetContentMD5 != nil {
+ req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5))
+ }
+ return req, nil
+}
+
+// downloadResponder handles the response to the Download request.
+func (client fileClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusPartialContent)
+ if resp == nil {
+ return nil, err
+ }
+ return &DownloadResponse{rawResponse: resp.Response()}, err
+}
+
+// ForceCloseHandles closes all handles open for given file
+//
+// handleID is specifies handle ID opened on the file or directory to be closed. Asterix (‘*’) is a wildcard that
+// specifies all handles. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. marker is a string value that identifies the portion of the list to be
+// returned with the next list operation. The operation returns a marker value within the response body if the list
+// returned was not complete. The marker value may then be used in a subsequent call to request the next set of list
+// items. The marker value is opaque to the client. sharesnapshot is the snapshot parameter is an opaque DateTime value
+// that, when present, specifies the share snapshot to query.
+func (client fileClient) ForceCloseHandles(ctx context.Context, handleID string, timeout *int32, marker *string, sharesnapshot *string) (*FileForceCloseHandlesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.forceCloseHandlesPreparer(handleID, timeout, marker, sharesnapshot)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.forceCloseHandlesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileForceCloseHandlesResponse), err
+}
+
+// forceCloseHandlesPreparer prepares the ForceCloseHandles request.
+func (client fileClient) forceCloseHandlesPreparer(handleID string, timeout *int32, marker *string, sharesnapshot *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ params.Set("comp", "forceclosehandles")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-handle-id", handleID)
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// forceCloseHandlesResponder handles the response to the ForceCloseHandles request.
+func (client fileClient) forceCloseHandlesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileForceCloseHandlesResponse{rawResponse: resp.Response()}, err
+}
+
+// GetProperties returns all user-defined metadata, standard HTTP properties, and system properties for the file. It
+// does not return the content of the file.
+//
+// sharesnapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot
+// to query. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client fileClient) GetProperties(ctx context.Context, sharesnapshot *string, timeout *int32) (*FileGetPropertiesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getPropertiesPreparer(sharesnapshot, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileGetPropertiesResponse), err
+}
+
+// getPropertiesPreparer prepares the GetProperties request.
+func (client fileClient) getPropertiesPreparer(sharesnapshot *string, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("HEAD", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getPropertiesResponder handles the response to the GetProperties request.
+func (client fileClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileGetPropertiesResponse{rawResponse: resp.Response()}, err
+}
+
+// GetRangeList returns the list of valid ranges for a file.
+//
+// sharesnapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot
+// to query. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. rangeParameter is specifies the range of bytes over which to list ranges,
+// inclusively.
+func (client fileClient) GetRangeList(ctx context.Context, sharesnapshot *string, timeout *int32, rangeParameter *string) (*Ranges, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getRangeListPreparer(sharesnapshot, timeout, rangeParameter)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getRangeListResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*Ranges), err
+}
+
+// getRangeListPreparer prepares the GetRangeList request.
+func (client fileClient) getRangeListPreparer(sharesnapshot *string, timeout *int32, rangeParameter *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "rangelist")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if rangeParameter != nil {
+ req.Header.Set("x-ms-range", *rangeParameter)
+ }
+ return req, nil
+}
+
+// getRangeListResponder handles the response to the GetRangeList request.
+func (client fileClient) getRangeListResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &Ranges{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// ListHandles lists handles for file
+//
+// marker is a string value that identifies the portion of the list to be returned with the next list operation. The
+// operation returns a marker value within the response body if the list returned was not complete. The marker value
+// may then be used in a subsequent call to request the next set of list items. The marker value is opaque to the
+// client. maxresults is specifies the maximum number of entries to return. If the request does not specify maxresults,
+// or specifies a value greater than 5,000, the server will return up to 5,000 items. timeout is the timeout parameter
+// is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. sharesnapshot is the snapshot parameter is an opaque DateTime value that,
+// when present, specifies the share snapshot to query.
+func (client fileClient) ListHandles(ctx context.Context, marker *string, maxresults *int32, timeout *int32, sharesnapshot *string) (*ListHandlesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: maxresults,
+ constraints: []constraint{{target: "maxresults", name: null, rule: false,
+ chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.listHandlesPreparer(marker, maxresults, timeout, sharesnapshot)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listHandlesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ListHandlesResponse), err
+}
+
+// listHandlesPreparer prepares the ListHandles request.
+func (client fileClient) listHandlesPreparer(marker *string, maxresults *int32, timeout *int32, sharesnapshot *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if maxresults != nil {
+ params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ params.Set("comp", "listhandles")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// listHandlesResponder handles the response to the ListHandles request.
+func (client fileClient) listHandlesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &ListHandlesResponse{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// SetHTTPHeaders sets HTTP headers on the file.
+//
+// fileAttributes is if specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and
+// ‘Directory’ for directory. ‘None’ can also be specified as default. fileCreationTime is creation time for the
+// file/directory. Default value: Now. fileLastWriteTime is last write time for the file/directory. Default value: Now.
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. fileContentLength is resizes a file to the specified size. If the
+// specified byte value is less than the current size of the file, then all ranges above the specified byte value are
+// cleared. fileContentType is sets the MIME content type of the file. The default type is 'application/octet-stream'.
+// fileContentEncoding is specifies which content encodings have been applied to the file. fileContentLanguage is
+// specifies the natural languages used by this resource. fileCacheControl is sets the file's cache control. The File
+// service stores this value but does not use or modify it. fileContentMD5 is sets the file's MD5 hash.
+// fileContentDisposition is sets the file's Content-Disposition header. filePermission is if specified the permission
+// (security descriptor) shall be set for the directory/file. This header can be used if Permission size is <= 8KB,
+// else x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it must
+// have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be
+// specified. filePermissionKey is key of the permission to be set for the directory/file. Note: Only one of the
+// x-ms-file-permission or x-ms-file-permission-key should be specified.
+func (client fileClient) SetHTTPHeaders(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, fileContentLength *int64, fileContentType *string, fileContentEncoding *string, fileContentLanguage *string, fileCacheControl *string, fileContentMD5 []byte, fileContentDisposition *string, filePermission *string, filePermissionKey *string) (*FileSetHTTPHeadersResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setHTTPHeadersPreparer(fileAttributes, fileCreationTime, fileLastWriteTime, timeout, fileContentLength, fileContentType, fileContentEncoding, fileContentLanguage, fileCacheControl, fileContentMD5, fileContentDisposition, filePermission, filePermissionKey)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileSetHTTPHeadersResponse), err
+}
+
+// setHTTPHeadersPreparer prepares the SetHTTPHeaders request.
+func (client fileClient) setHTTPHeadersPreparer(fileAttributes string, fileCreationTime string, fileLastWriteTime string, timeout *int32, fileContentLength *int64, fileContentType *string, fileContentEncoding *string, fileContentLanguage *string, fileCacheControl *string, fileContentMD5 []byte, fileContentDisposition *string, filePermission *string, filePermissionKey *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "properties")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if fileContentLength != nil {
+ req.Header.Set("x-ms-content-length", strconv.FormatInt(*fileContentLength, 10))
+ }
+ if fileContentType != nil {
+ req.Header.Set("x-ms-content-type", *fileContentType)
+ }
+ if fileContentEncoding != nil {
+ req.Header.Set("x-ms-content-encoding", *fileContentEncoding)
+ }
+ if fileContentLanguage != nil {
+ req.Header.Set("x-ms-content-language", *fileContentLanguage)
+ }
+ if fileCacheControl != nil {
+ req.Header.Set("x-ms-cache-control", *fileCacheControl)
+ }
+ if fileContentMD5 != nil {
+ req.Header.Set("x-ms-content-md5", base64.StdEncoding.EncodeToString(fileContentMD5))
+ }
+ if fileContentDisposition != nil {
+ req.Header.Set("x-ms-content-disposition", *fileContentDisposition)
+ }
+ if filePermission != nil {
+ req.Header.Set("x-ms-file-permission", *filePermission)
+ }
+ if filePermissionKey != nil {
+ req.Header.Set("x-ms-file-permission-key", *filePermissionKey)
+ }
+ req.Header.Set("x-ms-file-attributes", fileAttributes)
+ req.Header.Set("x-ms-file-creation-time", fileCreationTime)
+ req.Header.Set("x-ms-file-last-write-time", fileLastWriteTime)
+ return req, nil
+}
+
+// setHTTPHeadersResponder handles the response to the SetHTTPHeaders request.
+func (client fileClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileSetHTTPHeadersResponse{rawResponse: resp.Response()}, err
+}
+
+// SetMetadata updates user-defined metadata for the specified file.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+func (client fileClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string) (*FileSetMetadataResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setMetadataPreparer(timeout, metadata)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileSetMetadataResponse), err
+}
+
+// setMetadataPreparer prepares the SetMetadata request.
+func (client fileClient) setMetadataPreparer(timeout *int32, metadata map[string]string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "metadata")
+ req.URL.RawQuery = params.Encode()
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// setMetadataResponder handles the response to the SetMetadata request.
+func (client fileClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileSetMetadataResponse{rawResponse: resp.Response()}, err
+}
+
+// StartCopy copies a blob or file to a destination file within the storage account.
+//
+// copySource is specifies the URL of the source file or blob, up to 2 KB in length. To copy a file to another file
+// within the same storage account, you may use Shared Key to authenticate the source file. If you are copying a file
+// from another storage account, or if you are copying a blob from the same storage account or another storage account,
+// then you must authenticate the source file or blob using a shared access signature. If the source is a public blob,
+// no authentication is required to perform the copy operation. A file in a share snapshot can also be specified as a
+// copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+func (client fileClient) StartCopy(ctx context.Context, copySource string, timeout *int32, metadata map[string]string) (*FileStartCopyResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.startCopyPreparer(copySource, timeout, metadata)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileStartCopyResponse), err
+}
+
+// startCopyPreparer prepares the StartCopy request.
+func (client fileClient) startCopyPreparer(copySource string, timeout *int32, metadata map[string]string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ req.Header.Set("x-ms-copy-source", copySource)
+ return req, nil
+}
+
+// startCopyResponder handles the response to the StartCopy request.
+func (client fileClient) startCopyResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileStartCopyResponse{rawResponse: resp.Response()}, err
+}
+
+// UploadRange upload a range of bytes to a file.
+//
+// rangeParameter is specifies the range of bytes to be written. Both the start and end of the range must be specified.
+// For an update operation, the range can be up to 4 MB in size. For a clear operation, the range can be up to the
+// value of the file's full size. The File service accepts only a single byte range for the Range and 'x-ms-range'
+// headers, and the byte range must be specified in the following format: bytes=startByte-endByte. fileRangeWrite is
+// specify one of the following options: - Update: Writes the bytes specified by the request body into the specified
+// range. The Range and Content-Length headers must match to perform the update. - Clear: Clears the specified range
+// and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and
+// set the Range header to a value that indicates the range to clear, up to maximum file size. contentLength is
+// specifies the number of bytes being transmitted in the request body. When the x-ms-write header is set to clear, the
+// value of this header must be set to zero. optionalbody is initial data. optionalbody will be closed upon successful
+// return. Callers should ensure closure when receiving an error.timeout is the timeout parameter is expressed in
+// seconds. For more information, see Setting
+// Timeouts for File Service Operations. contentMD5 is an MD5 hash of the content. This hash is used to verify the
+// integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash
+// of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation
+// will fail with error code 400 (Bad Request).
+func (client fileClient) UploadRange(ctx context.Context, rangeParameter string, fileRangeWrite FileRangeWriteType, contentLength int64, body io.ReadSeeker, timeout *int32, contentMD5 []byte) (*FileUploadRangeResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.uploadRangePreparer(rangeParameter, fileRangeWrite, contentLength, body, timeout, contentMD5)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadRangeResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileUploadRangeResponse), err
+}
+
+// uploadRangePreparer prepares the UploadRange request.
+func (client fileClient) uploadRangePreparer(rangeParameter string, fileRangeWrite FileRangeWriteType, contentLength int64, body io.ReadSeeker, timeout *int32, contentMD5 []byte) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, body)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "range")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-range", rangeParameter)
+ req.Header.Set("x-ms-write", string(fileRangeWrite))
+ req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ if contentMD5 != nil {
+ req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(contentMD5))
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// uploadRangeResponder handles the response to the UploadRange request.
+func (client fileClient) uploadRangeResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileUploadRangeResponse{rawResponse: resp.Response()}, err
+}
+
+// UploadRangeFromURL upload a range of bytes to a file where the contents are read from a URL.
+//
+// rangeParameter is writes data to the specified byte range in the file. copySource is specifies the URL of the source
+// file or blob, up to 2 KB in length. To copy a file to another file within the same storage account, you may use
+// Shared Key to authenticate the source file. If you are copying a file from another storage account, or if you are
+// copying a blob from the same storage account or another storage account, then you must authenticate the source file
+// or blob using a shared access signature. If the source is a public blob, no authentication is required to perform
+// the copy operation. A file in a share snapshot can also be specified as a copy source. contentLength is specifies
+// the number of bytes being transmitted in the request body. When the x-ms-write header is set to clear, the value of
+// this header must be set to zero. timeout is the timeout parameter is expressed in seconds. For more information, see
+// Setting
+// Timeouts for File Service Operations. sourceRange is bytes of source data in the specified range.
+// sourceContentCrc64 is specify the crc64 calculated for the range of bytes that must be read from the copy source.
+// sourceIfMatchCrc64 is specify the crc64 value to operate only on range with a matching crc64 checksum.
+// sourceIfNoneMatchCrc64 is specify the crc64 value to operate only on range without a matching crc64 checksum.
+func (client fileClient) UploadRangeFromURL(ctx context.Context, rangeParameter string, copySource string, contentLength int64, timeout *int32, sourceRange *string, sourceContentCrc64 []byte, sourceIfMatchCrc64 []byte, sourceIfNoneMatchCrc64 []byte) (*FileUploadRangeFromURLResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.uploadRangeFromURLPreparer(rangeParameter, copySource, contentLength, timeout, sourceRange, sourceContentCrc64, sourceIfMatchCrc64, sourceIfNoneMatchCrc64)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadRangeFromURLResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FileUploadRangeFromURLResponse), err
+}
+
+// uploadRangeFromURLPreparer prepares the UploadRangeFromURL request.
+func (client fileClient) uploadRangeFromURLPreparer(rangeParameter string, copySource string, contentLength int64, timeout *int32, sourceRange *string, sourceContentCrc64 []byte, sourceIfMatchCrc64 []byte, sourceIfNoneMatchCrc64 []byte) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "range")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-range", rangeParameter)
+ req.Header.Set("x-ms-copy-source", copySource)
+ if sourceRange != nil {
+ req.Header.Set("x-ms-source-range", *sourceRange)
+ }
+ req.Header.Set("x-ms-write", "update")
+ req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ if sourceContentCrc64 != nil {
+ req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentCrc64))
+ }
+ if sourceIfMatchCrc64 != nil {
+ req.Header.Set("x-ms-source-if-match-crc64", base64.StdEncoding.EncodeToString(sourceIfMatchCrc64))
+ }
+ if sourceIfNoneMatchCrc64 != nil {
+ req.Header.Set("x-ms-source-if-none-match-crc64", base64.StdEncoding.EncodeToString(sourceIfNoneMatchCrc64))
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// uploadRangeFromURLResponder handles the response to the UploadRangeFromURL request.
+func (client fileClient) uploadRangeFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &FileUploadRangeFromURLResponse{rawResponse: resp.Response()}, err
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_models.go
new file mode 100644
index 0000000..9f2ee9f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_models.go
@@ -0,0 +1,3208 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unsafe"
+)
+
+// ETag is an entity tag.
+type ETag string
+
+const (
+ // ETagNone represents an empty entity tag.
+ ETagNone ETag = ""
+
+ // ETagAny matches any entity tag.
+ ETagAny ETag = "*"
+)
+
+// Metadata contains metadata key/value pairs.
+type Metadata map[string]string
+
+const mdPrefix = "x-ms-meta-"
+
+const mdPrefixLen = len(mdPrefix)
+
+// UnmarshalXML implements the xml.Unmarshaler interface for Metadata.
+func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ tokName := ""
+ for t, err := d.Token(); err == nil; t, err = d.Token() {
+ switch tt := t.(type) {
+ case xml.StartElement:
+ tokName = strings.ToLower(tt.Name.Local)
+ break
+ case xml.CharData:
+ if *md == nil {
+ *md = Metadata{}
+ }
+ (*md)[tokName] = string(tt)
+ break
+ }
+ }
+ return nil
+}
+
+// Marker represents an opaque value used in paged responses.
+type Marker struct {
+ Val *string
+}
+
+// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true
+// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from
+// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only
+// after the service has returned the final result portion.
+func (m Marker) NotDone() bool {
+ return m.Val == nil || *m.Val != ""
+}
+
+// UnmarshalXML implements the xml.Unmarshaler interface for Marker.
+func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ var out string
+ err := d.DecodeElement(&out, &start)
+ m.Val = &out
+ return err
+}
+
+// concatenates a slice of const values with the specified separator between each item
+func joinConst(s interface{}, sep string) string {
+ v := reflect.ValueOf(s)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ panic("s wasn't a slice or array")
+ }
+ ss := make([]string, 0, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, v.Index(i).String())
+ }
+ return strings.Join(ss, sep)
+}
+
+func validateError(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+// CopyStatusType enumerates the values for copy status type.
+type CopyStatusType string
+
+const (
+ // CopyStatusAborted ...
+ CopyStatusAborted CopyStatusType = "aborted"
+ // CopyStatusFailed ...
+ CopyStatusFailed CopyStatusType = "failed"
+ // CopyStatusNone represents an empty CopyStatusType.
+ CopyStatusNone CopyStatusType = ""
+ // CopyStatusPending ...
+ CopyStatusPending CopyStatusType = "pending"
+ // CopyStatusSuccess ...
+ CopyStatusSuccess CopyStatusType = "success"
+)
+
+// PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type.
+func PossibleCopyStatusTypeValues() []CopyStatusType {
+ return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess}
+}
+
+// DeleteSnapshotsOptionType enumerates the values for delete snapshots option type.
+type DeleteSnapshotsOptionType string
+
+const (
+ // DeleteSnapshotsOptionInclude ...
+ DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include"
+ // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType.
+ DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = ""
+)
+
+// PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type.
+func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
+ return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone}
+}
+
+// FileRangeWriteType enumerates the values for file range write type.
+type FileRangeWriteType string
+
+const (
+ // FileRangeWriteClear ...
+ FileRangeWriteClear FileRangeWriteType = "clear"
+ // FileRangeWriteNone represents an empty FileRangeWriteType.
+ FileRangeWriteNone FileRangeWriteType = ""
+ // FileRangeWriteUpdate ...
+ FileRangeWriteUpdate FileRangeWriteType = "update"
+)
+
+// PossibleFileRangeWriteTypeValues returns an array of possible values for the FileRangeWriteType const type.
+func PossibleFileRangeWriteTypeValues() []FileRangeWriteType {
+ return []FileRangeWriteType{FileRangeWriteClear, FileRangeWriteNone, FileRangeWriteUpdate}
+}
+
+// ListSharesIncludeType enumerates the values for list shares include type.
+type ListSharesIncludeType string
+
+const (
+ // ListSharesIncludeMetadata ...
+ ListSharesIncludeMetadata ListSharesIncludeType = "metadata"
+ // ListSharesIncludeNone represents an empty ListSharesIncludeType.
+ ListSharesIncludeNone ListSharesIncludeType = ""
+ // ListSharesIncludeSnapshots ...
+ ListSharesIncludeSnapshots ListSharesIncludeType = "snapshots"
+)
+
+// PossibleListSharesIncludeTypeValues returns an array of possible values for the ListSharesIncludeType const type.
+func PossibleListSharesIncludeTypeValues() []ListSharesIncludeType {
+ return []ListSharesIncludeType{ListSharesIncludeMetadata, ListSharesIncludeNone, ListSharesIncludeSnapshots}
+}
+
+// StorageErrorCodeType enumerates the values for storage error code type.
+type StorageErrorCodeType string
+
+const (
+ // StorageErrorCodeAccountAlreadyExists ...
+ StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists"
+ // StorageErrorCodeAccountBeingCreated ...
+ StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated"
+ // StorageErrorCodeAccountIsDisabled ...
+ StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled"
+ // StorageErrorCodeAuthenticationFailed ...
+ StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed"
+ // StorageErrorCodeAuthorizationFailure ...
+ StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure"
+ // StorageErrorCodeCannotDeleteFileOrDirectory ...
+ StorageErrorCodeCannotDeleteFileOrDirectory StorageErrorCodeType = "CannotDeleteFileOrDirectory"
+ // StorageErrorCodeClientCacheFlushDelay ...
+ StorageErrorCodeClientCacheFlushDelay StorageErrorCodeType = "ClientCacheFlushDelay"
+ // StorageErrorCodeConditionHeadersNotSupported ...
+ StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported"
+ // StorageErrorCodeConditionNotMet ...
+ StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet"
+ // StorageErrorCodeContainerQuotaDowngradeNotAllowed ...
+ StorageErrorCodeContainerQuotaDowngradeNotAllowed StorageErrorCodeType = "ContainerQuotaDowngradeNotAllowed"
+ // StorageErrorCodeDeletePending ...
+ StorageErrorCodeDeletePending StorageErrorCodeType = "DeletePending"
+ // StorageErrorCodeDirectoryNotEmpty ...
+ StorageErrorCodeDirectoryNotEmpty StorageErrorCodeType = "DirectoryNotEmpty"
+ // StorageErrorCodeEmptyMetadataKey ...
+ StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey"
+ // StorageErrorCodeFileLockConflict ...
+ StorageErrorCodeFileLockConflict StorageErrorCodeType = "FileLockConflict"
+ // StorageErrorCodeInsufficientAccountPermissions ...
+ StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions"
+ // StorageErrorCodeInternalError ...
+ StorageErrorCodeInternalError StorageErrorCodeType = "InternalError"
+ // StorageErrorCodeInvalidAuthenticationInfo ...
+ StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo"
+ // StorageErrorCodeInvalidFileOrDirectoryPathName ...
+ StorageErrorCodeInvalidFileOrDirectoryPathName StorageErrorCodeType = "InvalidFileOrDirectoryPathName"
+ // StorageErrorCodeInvalidHeaderValue ...
+ StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue"
+ // StorageErrorCodeInvalidHTTPVerb ...
+ StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb"
+ // StorageErrorCodeInvalidInput ...
+ StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput"
+ // StorageErrorCodeInvalidMd5 ...
+ StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5"
+ // StorageErrorCodeInvalidMetadata ...
+ StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata"
+ // StorageErrorCodeInvalidQueryParameterValue ...
+ StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue"
+ // StorageErrorCodeInvalidRange ...
+ StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange"
+ // StorageErrorCodeInvalidResourceName ...
+ StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName"
+ // StorageErrorCodeInvalidURI ...
+ StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri"
+ // StorageErrorCodeInvalidXMLDocument ...
+ StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument"
+ // StorageErrorCodeInvalidXMLNodeValue ...
+ StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue"
+ // StorageErrorCodeMd5Mismatch ...
+ StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch"
+ // StorageErrorCodeMetadataTooLarge ...
+ StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge"
+ // StorageErrorCodeMissingContentLengthHeader ...
+ StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader"
+ // StorageErrorCodeMissingRequiredHeader ...
+ StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader"
+ // StorageErrorCodeMissingRequiredQueryParameter ...
+ StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter"
+ // StorageErrorCodeMissingRequiredXMLNode ...
+ StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode"
+ // StorageErrorCodeMultipleConditionHeadersNotSupported ...
+ StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported"
+ // StorageErrorCodeNone represents an empty StorageErrorCodeType.
+ StorageErrorCodeNone StorageErrorCodeType = ""
+ // StorageErrorCodeOperationTimedOut ...
+ StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut"
+ // StorageErrorCodeOutOfRangeInput ...
+ StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput"
+ // StorageErrorCodeOutOfRangeQueryParameterValue ...
+ StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue"
+ // StorageErrorCodeParentNotFound ...
+ StorageErrorCodeParentNotFound StorageErrorCodeType = "ParentNotFound"
+ // StorageErrorCodeReadOnlyAttribute ...
+ StorageErrorCodeReadOnlyAttribute StorageErrorCodeType = "ReadOnlyAttribute"
+ // StorageErrorCodeRequestBodyTooLarge ...
+ StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge"
+ // StorageErrorCodeRequestURLFailedToParse ...
+ StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse"
+ // StorageErrorCodeResourceAlreadyExists ...
+ StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists"
+ // StorageErrorCodeResourceNotFound ...
+ StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound"
+ // StorageErrorCodeResourceTypeMismatch ...
+ StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch"
+ // StorageErrorCodeServerBusy ...
+ StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy"
+ // StorageErrorCodeShareAlreadyExists ...
+ StorageErrorCodeShareAlreadyExists StorageErrorCodeType = "ShareAlreadyExists"
+ // StorageErrorCodeShareBeingDeleted ...
+ StorageErrorCodeShareBeingDeleted StorageErrorCodeType = "ShareBeingDeleted"
+ // StorageErrorCodeShareDisabled ...
+ StorageErrorCodeShareDisabled StorageErrorCodeType = "ShareDisabled"
+ // StorageErrorCodeShareHasSnapshots ...
+ StorageErrorCodeShareHasSnapshots StorageErrorCodeType = "ShareHasSnapshots"
+ // StorageErrorCodeShareNotFound ...
+ StorageErrorCodeShareNotFound StorageErrorCodeType = "ShareNotFound"
+ // StorageErrorCodeShareSnapshotCountExceeded ...
+ StorageErrorCodeShareSnapshotCountExceeded StorageErrorCodeType = "ShareSnapshotCountExceeded"
+ // StorageErrorCodeShareSnapshotInProgress ...
+ StorageErrorCodeShareSnapshotInProgress StorageErrorCodeType = "ShareSnapshotInProgress"
+ // StorageErrorCodeShareSnapshotOperationNotSupported ...
+ StorageErrorCodeShareSnapshotOperationNotSupported StorageErrorCodeType = "ShareSnapshotOperationNotSupported"
+ // StorageErrorCodeSharingViolation ...
+ StorageErrorCodeSharingViolation StorageErrorCodeType = "SharingViolation"
+ // StorageErrorCodeUnsupportedHeader ...
+ StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader"
+ // StorageErrorCodeUnsupportedHTTPVerb ...
+ StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb"
+ // StorageErrorCodeUnsupportedQueryParameter ...
+ StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter"
+ // StorageErrorCodeUnsupportedXMLNode ...
+ StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode"
+)
+
+// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type.
+func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType {
+ return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeCannotDeleteFileOrDirectory, StorageErrorCodeClientCacheFlushDelay, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerQuotaDowngradeNotAllowed, StorageErrorCodeDeletePending, StorageErrorCodeDirectoryNotEmpty, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFileLockConflict, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidFileOrDirectoryPathName, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodeParentNotFound, StorageErrorCodeReadOnlyAttribute, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeServerBusy, StorageErrorCodeShareAlreadyExists, StorageErrorCodeShareBeingDeleted, StorageErrorCodeShareDisabled, StorageErrorCodeShareHasSnapshots, StorageErrorCodeShareNotFound, StorageErrorCodeShareSnapshotCountExceeded, StorageErrorCodeShareSnapshotInProgress, StorageErrorCodeShareSnapshotOperationNotSupported, StorageErrorCodeSharingViolation, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode}
+}
+
+// AccessPolicy - An Access policy.
+type AccessPolicy struct {
+ // Start - The date-time the policy is active.
+ Start *time.Time `xml:"Start"`
+ // Expiry - The date-time the policy expires.
+ Expiry *time.Time `xml:"Expiry"`
+ // Permission - The permissions for the ACL policy.
+ Permission *string `xml:"Permission"`
+}
+
+// MarshalXML implements the xml.Marshaler interface for AccessPolicy.
+func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ ap2 := (*accessPolicy)(unsafe.Pointer(&ap))
+ return e.EncodeElement(*ap2, start)
+}
+
+// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy.
+func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ ap2 := (*accessPolicy)(unsafe.Pointer(ap))
+ return d.DecodeElement(ap2, &start)
+}
+
+// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access
+// resources in another domain. Web browsers implement a security restriction known as same-origin policy that
+// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain
+// (the origin domain) to call APIs in another domain.
+type CorsRule struct {
+ // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS.
+ AllowedOrigins string `xml:"AllowedOrigins"`
+ // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated)
+ AllowedMethods string `xml:"AllowedMethods"`
+ // AllowedHeaders - The request headers that the origin domain may specify on the CORS request.
+ AllowedHeaders string `xml:"AllowedHeaders"`
+ // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer.
+ ExposedHeaders string `xml:"ExposedHeaders"`
+ // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request.
+ MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"`
+}
+
+// DirectoryCreateResponse ...
+type DirectoryCreateResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (dcr DirectoryCreateResponse) Response() *http.Response {
+ return dcr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dcr DirectoryCreateResponse) StatusCode() int {
+ return dcr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dcr DirectoryCreateResponse) Status() string {
+ return dcr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (dcr DirectoryCreateResponse) Date() time.Time {
+ s := dcr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (dcr DirectoryCreateResponse) ErrorCode() string {
+ return dcr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (dcr DirectoryCreateResponse) ETag() ETag {
+ return ETag(dcr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (dcr DirectoryCreateResponse) FileAttributes() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (dcr DirectoryCreateResponse) FileChangeTime() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (dcr DirectoryCreateResponse) FileCreationTime() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (dcr DirectoryCreateResponse) FileID() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (dcr DirectoryCreateResponse) FileLastWriteTime() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (dcr DirectoryCreateResponse) FileParentID() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (dcr DirectoryCreateResponse) FilePermissionKey() string {
+ return dcr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (dcr DirectoryCreateResponse) IsServerEncrypted() string {
+ return dcr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (dcr DirectoryCreateResponse) LastModified() time.Time {
+ s := dcr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dcr DirectoryCreateResponse) RequestID() string {
+ return dcr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dcr DirectoryCreateResponse) Version() string {
+ return dcr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DirectoryDeleteResponse ...
+type DirectoryDeleteResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (ddr DirectoryDeleteResponse) Response() *http.Response {
+ return ddr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ddr DirectoryDeleteResponse) StatusCode() int {
+ return ddr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ddr DirectoryDeleteResponse) Status() string {
+ return ddr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (ddr DirectoryDeleteResponse) Date() time.Time {
+ s := ddr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ddr DirectoryDeleteResponse) ErrorCode() string {
+ return ddr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ddr DirectoryDeleteResponse) RequestID() string {
+ return ddr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ddr DirectoryDeleteResponse) Version() string {
+ return ddr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DirectoryForceCloseHandlesResponse ...
+type DirectoryForceCloseHandlesResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (dfchr DirectoryForceCloseHandlesResponse) Response() *http.Response {
+ return dfchr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dfchr DirectoryForceCloseHandlesResponse) StatusCode() int {
+ return dfchr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dfchr DirectoryForceCloseHandlesResponse) Status() string {
+ return dfchr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (dfchr DirectoryForceCloseHandlesResponse) Date() time.Time {
+ s := dfchr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (dfchr DirectoryForceCloseHandlesResponse) ErrorCode() string {
+ return dfchr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// Marker returns the value for header x-ms-marker.
+func (dfchr DirectoryForceCloseHandlesResponse) Marker() string {
+ return dfchr.rawResponse.Header.Get("x-ms-marker")
+}
+
+// NumberOfHandlesClosed returns the value for header x-ms-number-of-handles-closed.
+func (dfchr DirectoryForceCloseHandlesResponse) NumberOfHandlesClosed() int32 {
+ s := dfchr.rawResponse.Header.Get("x-ms-number-of-handles-closed")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ i = 0
+ }
+ return int32(i)
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dfchr DirectoryForceCloseHandlesResponse) RequestID() string {
+ return dfchr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dfchr DirectoryForceCloseHandlesResponse) Version() string {
+ return dfchr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DirectoryGetPropertiesResponse ...
+type DirectoryGetPropertiesResponse struct {
+ rawResponse *http.Response
+}
+
+// NewMetadata returns user-defined key/value pairs.
+func (dgpr DirectoryGetPropertiesResponse) NewMetadata() Metadata {
+ md := Metadata{}
+ for k, v := range dgpr.rawResponse.Header {
+ if len(k) > mdPrefixLen {
+ if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) {
+ md[strings.ToLower(k[mdPrefixLen:])] = v[0]
+ }
+ }
+ }
+ return md
+}
+
+// Response returns the raw HTTP response object.
+func (dgpr DirectoryGetPropertiesResponse) Response() *http.Response {
+ return dgpr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dgpr DirectoryGetPropertiesResponse) StatusCode() int {
+ return dgpr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dgpr DirectoryGetPropertiesResponse) Status() string {
+ return dgpr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (dgpr DirectoryGetPropertiesResponse) Date() time.Time {
+ s := dgpr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (dgpr DirectoryGetPropertiesResponse) ErrorCode() string {
+ return dgpr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (dgpr DirectoryGetPropertiesResponse) ETag() ETag {
+ return ETag(dgpr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (dgpr DirectoryGetPropertiesResponse) FileAttributes() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (dgpr DirectoryGetPropertiesResponse) FileChangeTime() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (dgpr DirectoryGetPropertiesResponse) FileCreationTime() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (dgpr DirectoryGetPropertiesResponse) FileID() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (dgpr DirectoryGetPropertiesResponse) FileLastWriteTime() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (dgpr DirectoryGetPropertiesResponse) FileParentID() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (dgpr DirectoryGetPropertiesResponse) FilePermissionKey() string {
+ return dgpr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// IsServerEncrypted returns the value for header x-ms-server-encrypted.
+func (dgpr DirectoryGetPropertiesResponse) IsServerEncrypted() string {
+ return dgpr.rawResponse.Header.Get("x-ms-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (dgpr DirectoryGetPropertiesResponse) LastModified() time.Time {
+ s := dgpr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dgpr DirectoryGetPropertiesResponse) RequestID() string {
+ return dgpr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dgpr DirectoryGetPropertiesResponse) Version() string {
+ return dgpr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DirectoryItem - A listed directory item.
+//type DirectoryItem struct {
+// // XMLName is used for marshalling and is subject to removal in a future release.
+// XMLName xml.Name `xml:"Directory"`
+// Name string `xml:"Name"`
+//}
+
+// DirectorySetMetadataResponse ...
+type DirectorySetMetadataResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (dsmr DirectorySetMetadataResponse) Response() *http.Response {
+ return dsmr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dsmr DirectorySetMetadataResponse) StatusCode() int {
+ return dsmr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dsmr DirectorySetMetadataResponse) Status() string {
+ return dsmr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (dsmr DirectorySetMetadataResponse) Date() time.Time {
+ s := dsmr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (dsmr DirectorySetMetadataResponse) ErrorCode() string {
+ return dsmr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (dsmr DirectorySetMetadataResponse) ETag() ETag {
+ return ETag(dsmr.rawResponse.Header.Get("ETag"))
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (dsmr DirectorySetMetadataResponse) IsServerEncrypted() string {
+ return dsmr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dsmr DirectorySetMetadataResponse) RequestID() string {
+ return dsmr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dsmr DirectorySetMetadataResponse) Version() string {
+ return dsmr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DirectorySetPropertiesResponse ...
+type DirectorySetPropertiesResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (dspr DirectorySetPropertiesResponse) Response() *http.Response {
+ return dspr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dspr DirectorySetPropertiesResponse) StatusCode() int {
+ return dspr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dspr DirectorySetPropertiesResponse) Status() string {
+ return dspr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (dspr DirectorySetPropertiesResponse) Date() time.Time {
+ s := dspr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (dspr DirectorySetPropertiesResponse) ErrorCode() string {
+ return dspr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (dspr DirectorySetPropertiesResponse) ETag() ETag {
+ return ETag(dspr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (dspr DirectorySetPropertiesResponse) FileAttributes() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (dspr DirectorySetPropertiesResponse) FileChangeTime() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (dspr DirectorySetPropertiesResponse) FileCreationTime() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (dspr DirectorySetPropertiesResponse) FileID() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (dspr DirectorySetPropertiesResponse) FileLastWriteTime() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (dspr DirectorySetPropertiesResponse) FileParentID() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (dspr DirectorySetPropertiesResponse) FilePermissionKey() string {
+ return dspr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (dspr DirectorySetPropertiesResponse) IsServerEncrypted() string {
+ return dspr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (dspr DirectorySetPropertiesResponse) LastModified() time.Time {
+ s := dspr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dspr DirectorySetPropertiesResponse) RequestID() string {
+ return dspr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dspr DirectorySetPropertiesResponse) Version() string {
+ return dspr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DownloadResponse - Wraps the response from the fileClient.Download method.
+type DownloadResponse struct {
+ rawResponse *http.Response
+}
+
+// NewMetadata returns user-defined key/value pairs.
+func (dr DownloadResponse) NewMetadata() Metadata {
+ md := Metadata{}
+ for k, v := range dr.rawResponse.Header {
+ if len(k) > mdPrefixLen {
+ if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) {
+ md[strings.ToLower(k[mdPrefixLen:])] = v[0]
+ }
+ }
+ }
+ return md
+}
+
+// Response returns the raw HTTP response object.
+func (dr DownloadResponse) Response() *http.Response {
+ return dr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dr DownloadResponse) StatusCode() int {
+ return dr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dr DownloadResponse) Status() string {
+ return dr.rawResponse.Status
+}
+
+// Body returns the raw HTTP response object's Body.
+func (dr DownloadResponse) Body() io.ReadCloser {
+ return dr.rawResponse.Body
+}
+
+// AcceptRanges returns the value for header Accept-Ranges.
+func (dr DownloadResponse) AcceptRanges() string {
+ return dr.rawResponse.Header.Get("Accept-Ranges")
+}
+
+// CacheControl returns the value for header Cache-Control.
+func (dr DownloadResponse) CacheControl() string {
+ return dr.rawResponse.Header.Get("Cache-Control")
+}
+
+// ContentDisposition returns the value for header Content-Disposition.
+func (dr DownloadResponse) ContentDisposition() string {
+ return dr.rawResponse.Header.Get("Content-Disposition")
+}
+
+// ContentEncoding returns the value for header Content-Encoding.
+func (dr DownloadResponse) ContentEncoding() string {
+ return dr.rawResponse.Header.Get("Content-Encoding")
+}
+
+// ContentLanguage returns the value for header Content-Language.
+func (dr DownloadResponse) ContentLanguage() string {
+ return dr.rawResponse.Header.Get("Content-Language")
+}
+
+// ContentLength returns the value for header Content-Length.
+func (dr DownloadResponse) ContentLength() int64 {
+ s := dr.rawResponse.Header.Get("Content-Length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// ContentMD5 returns the value for header Content-MD5.
+func (dr DownloadResponse) ContentMD5() []byte {
+ s := dr.rawResponse.Header.Get("Content-MD5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// ContentRange returns the value for header Content-Range.
+func (dr DownloadResponse) ContentRange() string {
+ return dr.rawResponse.Header.Get("Content-Range")
+}
+
+// ContentType returns the value for header Content-Type.
+func (dr DownloadResponse) ContentType() string {
+ return dr.rawResponse.Header.Get("Content-Type")
+}
+
+// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
+func (dr DownloadResponse) CopyCompletionTime() time.Time {
+ s := dr.rawResponse.Header.Get("x-ms-copy-completion-time")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// CopyID returns the value for header x-ms-copy-id.
+func (dr DownloadResponse) CopyID() string {
+ return dr.rawResponse.Header.Get("x-ms-copy-id")
+}
+
+// CopyProgress returns the value for header x-ms-copy-progress.
+func (dr DownloadResponse) CopyProgress() string {
+ return dr.rawResponse.Header.Get("x-ms-copy-progress")
+}
+
+// CopySource returns the value for header x-ms-copy-source.
+func (dr DownloadResponse) CopySource() string {
+ return dr.rawResponse.Header.Get("x-ms-copy-source")
+}
+
+// CopyStatus returns the value for header x-ms-copy-status.
+func (dr DownloadResponse) CopyStatus() CopyStatusType {
+ return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status"))
+}
+
+// CopyStatusDescription returns the value for header x-ms-copy-status-description.
+func (dr DownloadResponse) CopyStatusDescription() string {
+ return dr.rawResponse.Header.Get("x-ms-copy-status-description")
+}
+
+// Date returns the value for header Date.
+func (dr DownloadResponse) Date() time.Time {
+ s := dr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (dr DownloadResponse) ErrorCode() string {
+ return dr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (dr DownloadResponse) ETag() ETag {
+ return ETag(dr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (dr DownloadResponse) FileAttributes() string {
+ return dr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (dr DownloadResponse) FileChangeTime() string {
+ return dr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileContentMD5 returns the value for header x-ms-content-md5.
+func (dr DownloadResponse) FileContentMD5() []byte {
+ s := dr.rawResponse.Header.Get("x-ms-content-md5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (dr DownloadResponse) FileCreationTime() string {
+ return dr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (dr DownloadResponse) FileID() string {
+ return dr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (dr DownloadResponse) FileLastWriteTime() string {
+ return dr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (dr DownloadResponse) FileParentID() string {
+ return dr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (dr DownloadResponse) FilePermissionKey() string {
+ return dr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// IsServerEncrypted returns the value for header x-ms-server-encrypted.
+func (dr DownloadResponse) IsServerEncrypted() string {
+ return dr.rawResponse.Header.Get("x-ms-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (dr DownloadResponse) LastModified() time.Time {
+ s := dr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dr DownloadResponse) RequestID() string {
+ return dr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dr DownloadResponse) Version() string {
+ return dr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileAbortCopyResponse ...
+type FileAbortCopyResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (facr FileAbortCopyResponse) Response() *http.Response {
+ return facr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (facr FileAbortCopyResponse) StatusCode() int {
+ return facr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (facr FileAbortCopyResponse) Status() string {
+ return facr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (facr FileAbortCopyResponse) Date() time.Time {
+ s := facr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (facr FileAbortCopyResponse) ErrorCode() string {
+ return facr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (facr FileAbortCopyResponse) RequestID() string {
+ return facr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (facr FileAbortCopyResponse) Version() string {
+ return facr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileCreateResponse ...
+type FileCreateResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (fcr FileCreateResponse) Response() *http.Response {
+ return fcr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fcr FileCreateResponse) StatusCode() int {
+ return fcr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fcr FileCreateResponse) Status() string {
+ return fcr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (fcr FileCreateResponse) Date() time.Time {
+ s := fcr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fcr FileCreateResponse) ErrorCode() string {
+ return fcr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (fcr FileCreateResponse) ETag() ETag {
+ return ETag(fcr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (fcr FileCreateResponse) FileAttributes() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (fcr FileCreateResponse) FileChangeTime() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (fcr FileCreateResponse) FileCreationTime() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (fcr FileCreateResponse) FileID() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (fcr FileCreateResponse) FileLastWriteTime() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (fcr FileCreateResponse) FileParentID() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (fcr FileCreateResponse) FilePermissionKey() string {
+ return fcr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (fcr FileCreateResponse) IsServerEncrypted() string {
+ return fcr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (fcr FileCreateResponse) LastModified() time.Time {
+ s := fcr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fcr FileCreateResponse) RequestID() string {
+ return fcr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fcr FileCreateResponse) Version() string {
+ return fcr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileDeleteResponse ...
+type FileDeleteResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (fdr FileDeleteResponse) Response() *http.Response {
+ return fdr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fdr FileDeleteResponse) StatusCode() int {
+ return fdr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fdr FileDeleteResponse) Status() string {
+ return fdr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (fdr FileDeleteResponse) Date() time.Time {
+ s := fdr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fdr FileDeleteResponse) ErrorCode() string {
+ return fdr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fdr FileDeleteResponse) RequestID() string {
+ return fdr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fdr FileDeleteResponse) Version() string {
+ return fdr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileForceCloseHandlesResponse ...
+type FileForceCloseHandlesResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (ffchr FileForceCloseHandlesResponse) Response() *http.Response {
+ return ffchr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ffchr FileForceCloseHandlesResponse) StatusCode() int {
+ return ffchr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ffchr FileForceCloseHandlesResponse) Status() string {
+ return ffchr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (ffchr FileForceCloseHandlesResponse) Date() time.Time {
+ s := ffchr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ffchr FileForceCloseHandlesResponse) ErrorCode() string {
+ return ffchr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// Marker returns the value for header x-ms-marker.
+func (ffchr FileForceCloseHandlesResponse) Marker() string {
+ return ffchr.rawResponse.Header.Get("x-ms-marker")
+}
+
+// NumberOfHandlesClosed returns the value for header x-ms-number-of-handles-closed.
+func (ffchr FileForceCloseHandlesResponse) NumberOfHandlesClosed() int32 {
+ s := ffchr.rawResponse.Header.Get("x-ms-number-of-handles-closed")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ i = 0
+ }
+ return int32(i)
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ffchr FileForceCloseHandlesResponse) RequestID() string {
+ return ffchr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ffchr FileForceCloseHandlesResponse) Version() string {
+ return ffchr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileGetPropertiesResponse ...
+type FileGetPropertiesResponse struct {
+ rawResponse *http.Response
+}
+
+// NewMetadata returns user-defined key/value pairs.
+func (fgpr FileGetPropertiesResponse) NewMetadata() Metadata {
+ md := Metadata{}
+ for k, v := range fgpr.rawResponse.Header {
+ if len(k) > mdPrefixLen {
+ if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) {
+ md[strings.ToLower(k[mdPrefixLen:])] = v[0]
+ }
+ }
+ }
+ return md
+}
+
+// Response returns the raw HTTP response object.
+func (fgpr FileGetPropertiesResponse) Response() *http.Response {
+ return fgpr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fgpr FileGetPropertiesResponse) StatusCode() int {
+ return fgpr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fgpr FileGetPropertiesResponse) Status() string {
+ return fgpr.rawResponse.Status
+}
+
+// CacheControl returns the value for header Cache-Control.
+func (fgpr FileGetPropertiesResponse) CacheControl() string {
+ return fgpr.rawResponse.Header.Get("Cache-Control")
+}
+
+// ContentDisposition returns the value for header Content-Disposition.
+func (fgpr FileGetPropertiesResponse) ContentDisposition() string {
+ return fgpr.rawResponse.Header.Get("Content-Disposition")
+}
+
+// ContentEncoding returns the value for header Content-Encoding.
+func (fgpr FileGetPropertiesResponse) ContentEncoding() string {
+ return fgpr.rawResponse.Header.Get("Content-Encoding")
+}
+
+// ContentLanguage returns the value for header Content-Language.
+func (fgpr FileGetPropertiesResponse) ContentLanguage() string {
+ return fgpr.rawResponse.Header.Get("Content-Language")
+}
+
+// ContentLength returns the value for header Content-Length.
+func (fgpr FileGetPropertiesResponse) ContentLength() int64 {
+ s := fgpr.rawResponse.Header.Get("Content-Length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// ContentMD5 returns the value for header Content-MD5.
+func (fgpr FileGetPropertiesResponse) ContentMD5() []byte {
+ s := fgpr.rawResponse.Header.Get("Content-MD5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// ContentType returns the value for header Content-Type.
+func (fgpr FileGetPropertiesResponse) ContentType() string {
+ return fgpr.rawResponse.Header.Get("Content-Type")
+}
+
+// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
+func (fgpr FileGetPropertiesResponse) CopyCompletionTime() time.Time {
+ s := fgpr.rawResponse.Header.Get("x-ms-copy-completion-time")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// CopyID returns the value for header x-ms-copy-id.
+func (fgpr FileGetPropertiesResponse) CopyID() string {
+ return fgpr.rawResponse.Header.Get("x-ms-copy-id")
+}
+
+// CopyProgress returns the value for header x-ms-copy-progress.
+func (fgpr FileGetPropertiesResponse) CopyProgress() string {
+ return fgpr.rawResponse.Header.Get("x-ms-copy-progress")
+}
+
+// CopySource returns the value for header x-ms-copy-source.
+func (fgpr FileGetPropertiesResponse) CopySource() string {
+ return fgpr.rawResponse.Header.Get("x-ms-copy-source")
+}
+
+// CopyStatus returns the value for header x-ms-copy-status.
+func (fgpr FileGetPropertiesResponse) CopyStatus() CopyStatusType {
+ return CopyStatusType(fgpr.rawResponse.Header.Get("x-ms-copy-status"))
+}
+
+// CopyStatusDescription returns the value for header x-ms-copy-status-description.
+func (fgpr FileGetPropertiesResponse) CopyStatusDescription() string {
+ return fgpr.rawResponse.Header.Get("x-ms-copy-status-description")
+}
+
+// Date returns the value for header Date.
+func (fgpr FileGetPropertiesResponse) Date() time.Time {
+ s := fgpr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fgpr FileGetPropertiesResponse) ErrorCode() string {
+ return fgpr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (fgpr FileGetPropertiesResponse) ETag() ETag {
+ return ETag(fgpr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (fgpr FileGetPropertiesResponse) FileAttributes() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (fgpr FileGetPropertiesResponse) FileChangeTime() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (fgpr FileGetPropertiesResponse) FileCreationTime() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (fgpr FileGetPropertiesResponse) FileID() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (fgpr FileGetPropertiesResponse) FileLastWriteTime() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (fgpr FileGetPropertiesResponse) FileParentID() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (fgpr FileGetPropertiesResponse) FilePermissionKey() string {
+ return fgpr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// FileType returns the value for header x-ms-type.
+func (fgpr FileGetPropertiesResponse) FileType() string {
+ return string(fgpr.rawResponse.Header.Get("x-ms-type"))
+}
+
+// IsServerEncrypted returns the value for header x-ms-server-encrypted.
+func (fgpr FileGetPropertiesResponse) IsServerEncrypted() string {
+ return fgpr.rawResponse.Header.Get("x-ms-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (fgpr FileGetPropertiesResponse) LastModified() time.Time {
+ s := fgpr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fgpr FileGetPropertiesResponse) RequestID() string {
+ return fgpr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fgpr FileGetPropertiesResponse) Version() string {
+ return fgpr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileItem - A listed file item.
+//type FileItem struct {
+// // XMLName is used for marshalling and is subject to removal in a future release.
+// XMLName xml.Name `xml:"File"`
+// Name string `xml:"Name"`
+// Properties FileProperty `xml:"Properties"`
+//}
+
+// FileProperty - File properties.
+type FileProperty struct {
+ // ContentLength - Content length of the file. This value may not be up-to-date since an SMB client may have modified the file locally. The value of Content-Length may not reflect that fact until the handle is closed or the op-lock is broken. To retrieve current property values, call Get File Properties.
+ ContentLength int64 `xml:"Content-Length"`
+}
+
+// FilesAndDirectoriesListSegment - Abstract for entries that can be listed from Directory.
+type FilesAndDirectoriesListSegment struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Entries"`
+ DirectoryItems []DirectoryItem `xml:"Directory"`
+ FileItems []FileItem `xml:"File"`
+}
+
+// FileSetHTTPHeadersResponse ...
+type FileSetHTTPHeadersResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (fshhr FileSetHTTPHeadersResponse) Response() *http.Response {
+ return fshhr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fshhr FileSetHTTPHeadersResponse) StatusCode() int {
+ return fshhr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fshhr FileSetHTTPHeadersResponse) Status() string {
+ return fshhr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (fshhr FileSetHTTPHeadersResponse) Date() time.Time {
+ s := fshhr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fshhr FileSetHTTPHeadersResponse) ErrorCode() string {
+ return fshhr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (fshhr FileSetHTTPHeadersResponse) ETag() ETag {
+ return ETag(fshhr.rawResponse.Header.Get("ETag"))
+}
+
+// FileAttributes returns the value for header x-ms-file-attributes.
+func (fshhr FileSetHTTPHeadersResponse) FileAttributes() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-attributes")
+}
+
+// FileChangeTime returns the value for header x-ms-file-change-time.
+func (fshhr FileSetHTTPHeadersResponse) FileChangeTime() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-change-time")
+}
+
+// FileCreationTime returns the value for header x-ms-file-creation-time.
+func (fshhr FileSetHTTPHeadersResponse) FileCreationTime() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-creation-time")
+}
+
+// FileID returns the value for header x-ms-file-id.
+func (fshhr FileSetHTTPHeadersResponse) FileID() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-id")
+}
+
+// FileLastWriteTime returns the value for header x-ms-file-last-write-time.
+func (fshhr FileSetHTTPHeadersResponse) FileLastWriteTime() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-last-write-time")
+}
+
+// FileParentID returns the value for header x-ms-file-parent-id.
+func (fshhr FileSetHTTPHeadersResponse) FileParentID() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-parent-id")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (fshhr FileSetHTTPHeadersResponse) FilePermissionKey() string {
+ return fshhr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (fshhr FileSetHTTPHeadersResponse) IsServerEncrypted() string {
+ return fshhr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (fshhr FileSetHTTPHeadersResponse) LastModified() time.Time {
+ s := fshhr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fshhr FileSetHTTPHeadersResponse) RequestID() string {
+ return fshhr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fshhr FileSetHTTPHeadersResponse) Version() string {
+ return fshhr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileSetMetadataResponse ...
+type FileSetMetadataResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (fsmr FileSetMetadataResponse) Response() *http.Response {
+ return fsmr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fsmr FileSetMetadataResponse) StatusCode() int {
+ return fsmr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fsmr FileSetMetadataResponse) Status() string {
+ return fsmr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (fsmr FileSetMetadataResponse) Date() time.Time {
+ s := fsmr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fsmr FileSetMetadataResponse) ErrorCode() string {
+ return fsmr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (fsmr FileSetMetadataResponse) ETag() ETag {
+ return ETag(fsmr.rawResponse.Header.Get("ETag"))
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (fsmr FileSetMetadataResponse) IsServerEncrypted() string {
+ return fsmr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fsmr FileSetMetadataResponse) RequestID() string {
+ return fsmr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fsmr FileSetMetadataResponse) Version() string {
+ return fsmr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileStartCopyResponse ...
+type FileStartCopyResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (fscr FileStartCopyResponse) Response() *http.Response {
+ return fscr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fscr FileStartCopyResponse) StatusCode() int {
+ return fscr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fscr FileStartCopyResponse) Status() string {
+ return fscr.rawResponse.Status
+}
+
+// CopyID returns the value for header x-ms-copy-id.
+func (fscr FileStartCopyResponse) CopyID() string {
+ return fscr.rawResponse.Header.Get("x-ms-copy-id")
+}
+
+// CopyStatus returns the value for header x-ms-copy-status.
+func (fscr FileStartCopyResponse) CopyStatus() CopyStatusType {
+ return CopyStatusType(fscr.rawResponse.Header.Get("x-ms-copy-status"))
+}
+
+// Date returns the value for header Date.
+func (fscr FileStartCopyResponse) Date() time.Time {
+ s := fscr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fscr FileStartCopyResponse) ErrorCode() string {
+ return fscr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (fscr FileStartCopyResponse) ETag() ETag {
+ return ETag(fscr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (fscr FileStartCopyResponse) LastModified() time.Time {
+ s := fscr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fscr FileStartCopyResponse) RequestID() string {
+ return fscr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fscr FileStartCopyResponse) Version() string {
+ return fscr.rawResponse.Header.Get("x-ms-version")
+}
+
+// FileUploadRangeFromURLResponse ...
+type FileUploadRangeFromURLResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (furfur FileUploadRangeFromURLResponse) Response() *http.Response {
+ return furfur.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (furfur FileUploadRangeFromURLResponse) StatusCode() int {
+ return furfur.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (furfur FileUploadRangeFromURLResponse) Status() string {
+ return furfur.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (furfur FileUploadRangeFromURLResponse) Date() time.Time {
+ s := furfur.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (furfur FileUploadRangeFromURLResponse) ErrorCode() string {
+ return furfur.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (furfur FileUploadRangeFromURLResponse) ETag() ETag {
+ return ETag(furfur.rawResponse.Header.Get("ETag"))
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (furfur FileUploadRangeFromURLResponse) IsServerEncrypted() string {
+ return furfur.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (furfur FileUploadRangeFromURLResponse) LastModified() time.Time {
+ s := furfur.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (furfur FileUploadRangeFromURLResponse) RequestID() string {
+ return furfur.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (furfur FileUploadRangeFromURLResponse) Version() string {
+ return furfur.rawResponse.Header.Get("x-ms-version")
+}
+
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (furfur FileUploadRangeFromURLResponse) XMsContentCrc64() []byte {
+ s := furfur.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// FileUploadRangeResponse ...
+type FileUploadRangeResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (furr FileUploadRangeResponse) Response() *http.Response {
+ return furr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (furr FileUploadRangeResponse) StatusCode() int {
+ return furr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (furr FileUploadRangeResponse) Status() string {
+ return furr.rawResponse.Status
+}
+
+// ContentMD5 returns the value for header Content-MD5.
+func (furr FileUploadRangeResponse) ContentMD5() []byte {
+ s := furr.rawResponse.Header.Get("Content-MD5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// Date returns the value for header Date.
+func (furr FileUploadRangeResponse) Date() time.Time {
+ s := furr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (furr FileUploadRangeResponse) ErrorCode() string {
+ return furr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (furr FileUploadRangeResponse) ETag() ETag {
+ return ETag(furr.rawResponse.Header.Get("ETag"))
+}
+
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (furr FileUploadRangeResponse) IsServerEncrypted() string {
+ return furr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (furr FileUploadRangeResponse) LastModified() time.Time {
+ s := furr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (furr FileUploadRangeResponse) RequestID() string {
+ return furr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (furr FileUploadRangeResponse) Version() string {
+ return furr.rawResponse.Header.Get("x-ms-version")
+}
+
+// HandleItem - A listed Azure Storage handle item.
+type HandleItem struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Handle"`
+ // HandleID - XSMB service handle ID
+ HandleID string `xml:"HandleId"`
+ // Path - File or directory name including full path starting from share root
+ Path string `xml:"Path"`
+ // FileID - FileId uniquely identifies the file or directory.
+ FileID string `xml:"FileId"`
+ // ParentID - ParentId uniquely identifies the parent directory of the object.
+ ParentID *string `xml:"ParentId"`
+ // SessionID - SMB session ID in context of which the file handle was opened
+ SessionID string `xml:"SessionId"`
+ // ClientIP - Client IP that opened the handle
+ ClientIP string `xml:"ClientIp"`
+ // OpenTime - Time when the session that previously opened the handle has last been reconnected. (UTC)
+ OpenTime time.Time `xml:"OpenTime"`
+ // LastReconnectTime - Time handle was last connected to (UTC)
+ LastReconnectTime *time.Time `xml:"LastReconnectTime"`
+}
+
+// MarshalXML implements the xml.Marshaler interface for HandleItem.
+func (hi HandleItem) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ hi2 := (*handleItem)(unsafe.Pointer(&hi))
+ return e.EncodeElement(*hi2, start)
+}
+
+// UnmarshalXML implements the xml.Unmarshaler interface for HandleItem.
+func (hi *HandleItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ hi2 := (*handleItem)(unsafe.Pointer(hi))
+ return d.DecodeElement(hi2, &start)
+}
+
+//// ListFilesAndDirectoriesSegmentResponse - An enumeration of directories and files.
+//type ListFilesAndDirectoriesSegmentResponse struct {
+// rawResponse *http.Response
+// // XMLName is used for marshalling and is subject to removal in a future release.
+// XMLName xml.Name `xml:"EnumerationResults"`
+// ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
+// ShareName string `xml:"ShareName,attr"`
+// ShareSnapshot *string `xml:"ShareSnapshot,attr"`
+// DirectoryPath string `xml:"DirectoryPath,attr"`
+// Prefix string `xml:"Prefix"`
+// Marker *string `xml:"Marker"`
+// MaxResults *int32 `xml:"MaxResults"`
+// Segment FilesAndDirectoriesListSegment `xml:"Entries"`
+// NextMarker Marker `xml:"NextMarker"`
+//}
+//
+//// Response returns the raw HTTP response object.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) Response() *http.Response {
+// return lfadsr.rawResponse
+//}
+//
+//// StatusCode returns the HTTP status code of the response, e.g. 200.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) StatusCode() int {
+// return lfadsr.rawResponse.StatusCode
+//}
+//
+//// Status returns the HTTP status message of the response, e.g. "200 OK".
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) Status() string {
+// return lfadsr.rawResponse.Status
+//}
+//
+//// ContentType returns the value for header Content-Type.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) ContentType() string {
+// return lfadsr.rawResponse.Header.Get("Content-Type")
+//}
+//
+//// Date returns the value for header Date.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) Date() time.Time {
+// s := lfadsr.rawResponse.Header.Get("Date")
+// if s == "" {
+// return time.Time{}
+// }
+// t, err := time.Parse(time.RFC1123, s)
+// if err != nil {
+// t = time.Time{}
+// }
+// return t
+//}
+//
+//// ErrorCode returns the value for header x-ms-error-code.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) ErrorCode() string {
+// return lfadsr.rawResponse.Header.Get("x-ms-error-code")
+//}
+//
+//// RequestID returns the value for header x-ms-request-id.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) RequestID() string {
+// return lfadsr.rawResponse.Header.Get("x-ms-request-id")
+//}
+//
+//// Version returns the value for header x-ms-version.
+//func (lfadsr ListFilesAndDirectoriesSegmentResponse) Version() string {
+// return lfadsr.rawResponse.Header.Get("x-ms-version")
+//}
+
+// ListHandlesResponse - An enumeration of handles.
+type ListHandlesResponse struct {
+ rawResponse *http.Response
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"EnumerationResults"`
+ HandleList []HandleItem `xml:"Entries>Handle"`
+ NextMarker string `xml:"NextMarker"`
+}
+
+// Response returns the raw HTTP response object.
+func (lhr ListHandlesResponse) Response() *http.Response {
+ return lhr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (lhr ListHandlesResponse) StatusCode() int {
+ return lhr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (lhr ListHandlesResponse) Status() string {
+ return lhr.rawResponse.Status
+}
+
+// ContentType returns the value for header Content-Type.
+func (lhr ListHandlesResponse) ContentType() string {
+ return lhr.rawResponse.Header.Get("Content-Type")
+}
+
+// Date returns the value for header Date.
+func (lhr ListHandlesResponse) Date() time.Time {
+ s := lhr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (lhr ListHandlesResponse) ErrorCode() string {
+ return lhr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (lhr ListHandlesResponse) RequestID() string {
+ return lhr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (lhr ListHandlesResponse) Version() string {
+ return lhr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ListSharesResponse - An enumeration of shares.
+type ListSharesResponse struct {
+ rawResponse *http.Response
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"EnumerationResults"`
+ ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
+ Prefix *string `xml:"Prefix"`
+ Marker *string `xml:"Marker"`
+ MaxResults *int32 `xml:"MaxResults"`
+ ShareItems []ShareItem `xml:"Shares>Share"`
+ NextMarker Marker `xml:"NextMarker"`
+}
+
+// Response returns the raw HTTP response object.
+func (lsr ListSharesResponse) Response() *http.Response {
+ return lsr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (lsr ListSharesResponse) StatusCode() int {
+ return lsr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (lsr ListSharesResponse) Status() string {
+ return lsr.rawResponse.Status
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (lsr ListSharesResponse) ErrorCode() string {
+ return lsr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (lsr ListSharesResponse) RequestID() string {
+ return lsr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (lsr ListSharesResponse) Version() string {
+ return lsr.rawResponse.Header.Get("x-ms-version")
+}
+
+// Metrics - Storage Analytics metrics for file service.
+type Metrics struct {
+ // Version - The version of Storage Analytics to configure.
+ Version string `xml:"Version"`
+ // Enabled - Indicates whether metrics are enabled for the File service.
+ Enabled bool `xml:"Enabled"`
+ // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations.
+ IncludeAPIs *bool `xml:"IncludeAPIs"`
+ RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"`
+}
+
+// Range - An Azure Storage file range.
+type Range struct {
+ // Start - Start of the range.
+ Start int64 `xml:"Start"`
+ // End - End of the range.
+ End int64 `xml:"End"`
+}
+
+// Ranges - Wraps the response from the fileClient.GetRangeList method.
+type Ranges struct {
+ rawResponse *http.Response
+ Items []Range `xml:"Range"`
+}
+
+// Response returns the raw HTTP response object.
+func (r Ranges) Response() *http.Response {
+ return r.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (r Ranges) StatusCode() int {
+ return r.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (r Ranges) Status() string {
+ return r.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (r Ranges) Date() time.Time {
+ s := r.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (r Ranges) ErrorCode() string {
+ return r.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (r Ranges) ETag() ETag {
+ return ETag(r.rawResponse.Header.Get("ETag"))
+}
+
+// FileContentLength returns the value for header x-ms-content-length.
+func (r Ranges) FileContentLength() int64 {
+ s := r.rawResponse.Header.Get("x-ms-content-length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// LastModified returns the value for header Last-Modified.
+func (r Ranges) LastModified() time.Time {
+ s := r.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (r Ranges) RequestID() string {
+ return r.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (r Ranges) Version() string {
+ return r.rawResponse.Header.Get("x-ms-version")
+}
+
+// RetentionPolicy - The retention policy.
+type RetentionPolicy struct {
+ // Enabled - Indicates whether a retention policy is enabled for the File service. If false, metrics data is retained, and the user is responsible for deleting it.
+ Enabled bool `xml:"Enabled"`
+ // Days - Indicates the number of days that metrics data should be retained. All data older than this value will be deleted. Metrics data is deleted on a best-effort basis after the retention period expires.
+ Days *int32 `xml:"Days"`
+}
+
+// ServiceSetPropertiesResponse ...
+type ServiceSetPropertiesResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (sspr ServiceSetPropertiesResponse) Response() *http.Response {
+ return sspr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (sspr ServiceSetPropertiesResponse) StatusCode() int {
+ return sspr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (sspr ServiceSetPropertiesResponse) Status() string {
+ return sspr.rawResponse.Status
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (sspr ServiceSetPropertiesResponse) ErrorCode() string {
+ return sspr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (sspr ServiceSetPropertiesResponse) RequestID() string {
+ return sspr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (sspr ServiceSetPropertiesResponse) Version() string {
+ return sspr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareCreatePermissionResponse ...
+type ShareCreatePermissionResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (scpr ShareCreatePermissionResponse) Response() *http.Response {
+ return scpr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (scpr ShareCreatePermissionResponse) StatusCode() int {
+ return scpr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (scpr ShareCreatePermissionResponse) Status() string {
+ return scpr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (scpr ShareCreatePermissionResponse) Date() time.Time {
+ s := scpr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (scpr ShareCreatePermissionResponse) ErrorCode() string {
+ return scpr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// FilePermissionKey returns the value for header x-ms-file-permission-key.
+func (scpr ShareCreatePermissionResponse) FilePermissionKey() string {
+ return scpr.rawResponse.Header.Get("x-ms-file-permission-key")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (scpr ShareCreatePermissionResponse) RequestID() string {
+ return scpr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (scpr ShareCreatePermissionResponse) Version() string {
+ return scpr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareCreateResponse ...
+type ShareCreateResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (scr ShareCreateResponse) Response() *http.Response {
+ return scr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (scr ShareCreateResponse) StatusCode() int {
+ return scr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (scr ShareCreateResponse) Status() string {
+ return scr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (scr ShareCreateResponse) Date() time.Time {
+ s := scr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (scr ShareCreateResponse) ErrorCode() string {
+ return scr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (scr ShareCreateResponse) ETag() ETag {
+ return ETag(scr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (scr ShareCreateResponse) LastModified() time.Time {
+ s := scr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (scr ShareCreateResponse) RequestID() string {
+ return scr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (scr ShareCreateResponse) Version() string {
+ return scr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareCreateSnapshotResponse ...
+type ShareCreateSnapshotResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (scsr ShareCreateSnapshotResponse) Response() *http.Response {
+ return scsr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (scsr ShareCreateSnapshotResponse) StatusCode() int {
+ return scsr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (scsr ShareCreateSnapshotResponse) Status() string {
+ return scsr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (scsr ShareCreateSnapshotResponse) Date() time.Time {
+ s := scsr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (scsr ShareCreateSnapshotResponse) ErrorCode() string {
+ return scsr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (scsr ShareCreateSnapshotResponse) ETag() ETag {
+ return ETag(scsr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (scsr ShareCreateSnapshotResponse) LastModified() time.Time {
+ s := scsr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (scsr ShareCreateSnapshotResponse) RequestID() string {
+ return scsr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Snapshot returns the value for header x-ms-snapshot.
+func (scsr ShareCreateSnapshotResponse) Snapshot() string {
+ return scsr.rawResponse.Header.Get("x-ms-snapshot")
+}
+
+// Version returns the value for header x-ms-version.
+func (scsr ShareCreateSnapshotResponse) Version() string {
+ return scsr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareDeleteResponse ...
+type ShareDeleteResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (sdr ShareDeleteResponse) Response() *http.Response {
+ return sdr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (sdr ShareDeleteResponse) StatusCode() int {
+ return sdr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (sdr ShareDeleteResponse) Status() string {
+ return sdr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (sdr ShareDeleteResponse) Date() time.Time {
+ s := sdr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (sdr ShareDeleteResponse) ErrorCode() string {
+ return sdr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (sdr ShareDeleteResponse) RequestID() string {
+ return sdr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (sdr ShareDeleteResponse) Version() string {
+ return sdr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareGetPropertiesResponse ...
+type ShareGetPropertiesResponse struct {
+ rawResponse *http.Response
+}
+
+// NewMetadata returns user-defined key/value pairs.
+func (sgpr ShareGetPropertiesResponse) NewMetadata() Metadata {
+ md := Metadata{}
+ for k, v := range sgpr.rawResponse.Header {
+ if len(k) > mdPrefixLen {
+ if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) {
+ md[strings.ToLower(k[mdPrefixLen:])] = v[0]
+ }
+ }
+ }
+ return md
+}
+
+// Response returns the raw HTTP response object.
+func (sgpr ShareGetPropertiesResponse) Response() *http.Response {
+ return sgpr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (sgpr ShareGetPropertiesResponse) StatusCode() int {
+ return sgpr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (sgpr ShareGetPropertiesResponse) Status() string {
+ return sgpr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (sgpr ShareGetPropertiesResponse) Date() time.Time {
+ s := sgpr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (sgpr ShareGetPropertiesResponse) ErrorCode() string {
+ return sgpr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (sgpr ShareGetPropertiesResponse) ETag() ETag {
+ return ETag(sgpr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (sgpr ShareGetPropertiesResponse) LastModified() time.Time {
+ s := sgpr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// Quota returns the value for header x-ms-share-quota.
+func (sgpr ShareGetPropertiesResponse) Quota() int32 {
+ s := sgpr.rawResponse.Header.Get("x-ms-share-quota")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ i = 0
+ }
+ return int32(i)
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (sgpr ShareGetPropertiesResponse) RequestID() string {
+ return sgpr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (sgpr ShareGetPropertiesResponse) Version() string {
+ return sgpr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareItem - A listed Azure Storage share item.
+type ShareItem struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Share"`
+ Name string `xml:"Name"`
+ Snapshot *string `xml:"Snapshot"`
+ Properties ShareProperties `xml:"Properties"`
+ Metadata Metadata `xml:"Metadata"`
+}
+
+// SharePermission - A permission (a security descriptor) at the share level.
+type SharePermission struct {
+ rawResponse *http.Response
+ // Permission - The permission in the Security Descriptor Definition Language (SDDL).
+ Permission string `xml:"permission"`
+}
+
+// Response returns the raw HTTP response object.
+func (sp SharePermission) Response() *http.Response {
+ return sp.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (sp SharePermission) StatusCode() int {
+ return sp.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (sp SharePermission) Status() string {
+ return sp.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (sp SharePermission) Date() time.Time {
+ s := sp.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (sp SharePermission) ErrorCode() string {
+ return sp.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (sp SharePermission) RequestID() string {
+ return sp.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (sp SharePermission) Version() string {
+ return sp.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareProperties - Properties of a share.
+type ShareProperties struct {
+ LastModified time.Time `xml:"Last-Modified"`
+ Etag ETag `xml:"Etag"`
+ Quota int32 `xml:"Quota"`
+}
+
+// MarshalXML implements the xml.Marshaler interface for ShareProperties.
+func (sp ShareProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ sp2 := (*shareProperties)(unsafe.Pointer(&sp))
+ return e.EncodeElement(*sp2, start)
+}
+
+// UnmarshalXML implements the xml.Unmarshaler interface for ShareProperties.
+func (sp *ShareProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ sp2 := (*shareProperties)(unsafe.Pointer(sp))
+ return d.DecodeElement(sp2, &start)
+}
+
+// ShareSetAccessPolicyResponse ...
+type ShareSetAccessPolicyResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (ssapr ShareSetAccessPolicyResponse) Response() *http.Response {
+ return ssapr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ssapr ShareSetAccessPolicyResponse) StatusCode() int {
+ return ssapr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ssapr ShareSetAccessPolicyResponse) Status() string {
+ return ssapr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (ssapr ShareSetAccessPolicyResponse) Date() time.Time {
+ s := ssapr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ssapr ShareSetAccessPolicyResponse) ErrorCode() string {
+ return ssapr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (ssapr ShareSetAccessPolicyResponse) ETag() ETag {
+ return ETag(ssapr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (ssapr ShareSetAccessPolicyResponse) LastModified() time.Time {
+ s := ssapr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ssapr ShareSetAccessPolicyResponse) RequestID() string {
+ return ssapr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ssapr ShareSetAccessPolicyResponse) Version() string {
+ return ssapr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareSetMetadataResponse ...
+type ShareSetMetadataResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (ssmr ShareSetMetadataResponse) Response() *http.Response {
+ return ssmr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ssmr ShareSetMetadataResponse) StatusCode() int {
+ return ssmr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ssmr ShareSetMetadataResponse) Status() string {
+ return ssmr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (ssmr ShareSetMetadataResponse) Date() time.Time {
+ s := ssmr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ssmr ShareSetMetadataResponse) ErrorCode() string {
+ return ssmr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (ssmr ShareSetMetadataResponse) ETag() ETag {
+ return ETag(ssmr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (ssmr ShareSetMetadataResponse) LastModified() time.Time {
+ s := ssmr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ssmr ShareSetMetadataResponse) RequestID() string {
+ return ssmr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ssmr ShareSetMetadataResponse) Version() string {
+ return ssmr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareSetQuotaResponse ...
+type ShareSetQuotaResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (ssqr ShareSetQuotaResponse) Response() *http.Response {
+ return ssqr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ssqr ShareSetQuotaResponse) StatusCode() int {
+ return ssqr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ssqr ShareSetQuotaResponse) Status() string {
+ return ssqr.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (ssqr ShareSetQuotaResponse) Date() time.Time {
+ s := ssqr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ssqr ShareSetQuotaResponse) ErrorCode() string {
+ return ssqr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (ssqr ShareSetQuotaResponse) ETag() ETag {
+ return ETag(ssqr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (ssqr ShareSetQuotaResponse) LastModified() time.Time {
+ s := ssqr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ssqr ShareSetQuotaResponse) RequestID() string {
+ return ssqr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ssqr ShareSetQuotaResponse) Version() string {
+ return ssqr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ShareStats - Stats for the share.
+type ShareStats struct {
+ rawResponse *http.Response
+ // ShareUsageBytes - The approximate size of the data stored in bytes. Note that this value may not include all recently created or recently resized files.
+ ShareUsageBytes int32 `xml:"ShareUsageBytes"`
+}
+
+// Response returns the raw HTTP response object.
+func (ss ShareStats) Response() *http.Response {
+ return ss.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ss ShareStats) StatusCode() int {
+ return ss.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ss ShareStats) Status() string {
+ return ss.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (ss ShareStats) Date() time.Time {
+ s := ss.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ss ShareStats) ErrorCode() string {
+ return ss.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (ss ShareStats) ETag() ETag {
+ return ETag(ss.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (ss ShareStats) LastModified() time.Time {
+ s := ss.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ss ShareStats) RequestID() string {
+ return ss.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ss ShareStats) Version() string {
+ return ss.rawResponse.Header.Get("x-ms-version")
+}
+
+// SignedIdentifier - Signed identifier.
+type SignedIdentifier struct {
+ // ID - A unique id.
+ ID string `xml:"Id"`
+ // AccessPolicy - The access policy.
+ AccessPolicy *AccessPolicy `xml:"AccessPolicy"`
+}
+
+// SignedIdentifiers - Wraps the response from the shareClient.GetAccessPolicy method.
+type SignedIdentifiers struct {
+ rawResponse *http.Response
+ Items []SignedIdentifier `xml:"SignedIdentifier"`
+}
+
+// Response returns the raw HTTP response object.
+func (si SignedIdentifiers) Response() *http.Response {
+ return si.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (si SignedIdentifiers) StatusCode() int {
+ return si.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (si SignedIdentifiers) Status() string {
+ return si.rawResponse.Status
+}
+
+// Date returns the value for header Date.
+func (si SignedIdentifiers) Date() time.Time {
+ s := si.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (si SignedIdentifiers) ErrorCode() string {
+ return si.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (si SignedIdentifiers) ETag() ETag {
+ return ETag(si.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (si SignedIdentifiers) LastModified() time.Time {
+ s := si.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (si SignedIdentifiers) RequestID() string {
+ return si.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (si SignedIdentifiers) Version() string {
+ return si.rawResponse.Header.Get("x-ms-version")
+}
+
+// StorageError ...
+// type StorageError struct {
+// Message *string `xml:"Message"`
+// }
+
+// StorageServiceProperties - Storage service properties.
+type StorageServiceProperties struct {
+ rawResponse *http.Response
+ // HourMetrics - A summary of request statistics grouped by API in hourly aggregates for files.
+ HourMetrics *Metrics `xml:"HourMetrics"`
+ // MinuteMetrics - A summary of request statistics grouped by API in minute aggregates for files.
+ MinuteMetrics *Metrics `xml:"MinuteMetrics"`
+ // Cors - The set of CORS rules.
+ Cors []CorsRule `xml:"Cors>CorsRule"`
+}
+
+// Response returns the raw HTTP response object.
+func (ssp StorageServiceProperties) Response() *http.Response {
+ return ssp.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ssp StorageServiceProperties) StatusCode() int {
+ return ssp.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ssp StorageServiceProperties) Status() string {
+ return ssp.rawResponse.Status
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ssp StorageServiceProperties) ErrorCode() string {
+ return ssp.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ssp StorageServiceProperties) RequestID() string {
+ return ssp.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ssp StorageServiceProperties) Version() string {
+ return ssp.rawResponse.Header.Get("x-ms-version")
+}
+
+func init() {
+ if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() {
+ validateError(errors.New("size mismatch between AccessPolicy and accessPolicy"))
+ }
+ if reflect.TypeOf((*HandleItem)(nil)).Elem().Size() != reflect.TypeOf((*handleItem)(nil)).Elem().Size() {
+ validateError(errors.New("size mismatch between HandleItem and handleItem"))
+ }
+ if reflect.TypeOf((*ShareProperties)(nil)).Elem().Size() != reflect.TypeOf((*shareProperties)(nil)).Elem().Size() {
+ validateError(errors.New("size mismatch between ShareProperties and shareProperties"))
+ }
+}
+
+const (
+ rfc3339Format = "2006-01-02T15:04:05.0000000Z07:00"
+)
+
+// used to convert times from UTC to GMT before sending across the wire
+var gmt = time.FixedZone("GMT", 0)
+
+// internal type used for marshalling time in RFC1123 format
+type timeRFC1123 struct {
+ time.Time
+}
+
+// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123.
+func (t timeRFC1123) MarshalText() ([]byte, error) {
+ return []byte(t.Format(time.RFC1123)), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123.
+func (t *timeRFC1123) UnmarshalText(data []byte) (err error) {
+ t.Time, err = time.Parse(time.RFC1123, string(data))
+ return
+}
+
+// internal type used for marshalling time in RFC3339 format
+type timeRFC3339 struct {
+ time.Time
+}
+
+// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339.
+func (t timeRFC3339) MarshalText() ([]byte, error) {
+ return []byte(t.Format(rfc3339Format)), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339.
+func (t *timeRFC3339) UnmarshalText(data []byte) (err error) {
+ t.Time, err = time.Parse(rfc3339Format, string(data))
+ return
+}
+
+// internal type used for marshalling
+type accessPolicy struct {
+ Start *timeRFC3339 `xml:"Start"`
+ Expiry *timeRFC3339 `xml:"Expiry"`
+ Permission *string `xml:"Permission"`
+}
+
+// internal type used for marshalling
+type handleItem struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Handle"`
+ HandleID string `xml:"HandleId"`
+ Path string `xml:"Path"`
+ FileID string `xml:"FileId"`
+ ParentID *string `xml:"ParentId"`
+ SessionID string `xml:"SessionId"`
+ ClientIP string `xml:"ClientIp"`
+ OpenTime timeRFC1123 `xml:"OpenTime"`
+ LastReconnectTime *timeRFC1123 `xml:"LastReconnectTime"`
+}
+
+// internal type used for marshalling
+type shareProperties struct {
+ LastModified timeRFC1123 `xml:"Last-Modified"`
+ Etag ETag `xml:"Etag"`
+ Quota int32 `xml:"Quota"`
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_responder_policy.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_responder_policy.go
new file mode 100644
index 0000000..04ce203
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_responder_policy.go
@@ -0,0 +1,74 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "io/ioutil"
+)
+
+type responder func(resp pipeline.Response) (result pipeline.Response, err error)
+
+// ResponderPolicyFactory is a Factory capable of creating a responder pipeline.
+type responderPolicyFactory struct {
+ responder responder
+}
+
+// New creates a responder policy factory.
+func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
+ return responderPolicy{next: next, responder: arpf.responder}
+}
+
+type responderPolicy struct {
+ next pipeline.Policy
+ responder responder
+}
+
+// Do sends the request to the service and validates/deserializes the HTTP response.
+func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
+ resp, err := arp.next.Do(ctx, request)
+ if err != nil {
+ return resp, err
+ }
+ return arp.responder(resp)
+}
+
+// validateResponse checks an HTTP response's status code against a legal set of codes.
+// If the response code is not legal, then validateResponse reads all of the response's body
+// (containing error information) and returns a response error.
+func validateResponse(resp pipeline.Response, successStatusCodes ...int) error {
+ if resp == nil {
+ return NewResponseError(nil, nil, "nil response")
+ }
+ responseCode := resp.Response().StatusCode
+ for _, i := range successStatusCodes {
+ if i == responseCode {
+ return nil
+ }
+ }
+ // only close the body in the failure case. in the
+ // success case responders will close the body as required.
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return err
+ }
+ // the service code, description and details will be populated during unmarshalling
+ responseError := NewResponseError(nil, resp.Response(), resp.Response().Status)
+ if len(b) > 0 {
+ if err = xml.Unmarshal(b, &responseError); err != nil {
+ return NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return responseError
+}
+
+// removes any BOM from the byte slice
+func removeBOM(b []byte) []byte {
+ // UTF8
+ return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_response_error.go
new file mode 100644
index 0000000..34b27bd
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_response_error.go
@@ -0,0 +1,95 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "net"
+ "net/http"
+)
+
+// if you want to provide custom error handling set this variable to your constructor function
+var responseErrorFactory func(cause error, response *http.Response, description string) error
+
+// ResponseError identifies a responder-generated network or response parsing error.
+type ResponseError interface {
+ // Error exposes the Error(), Temporary() and Timeout() methods.
+ net.Error // Includes the Go error interface
+ // Response returns the HTTP response. You may examine this but you should not modify it.
+ Response() *http.Response
+}
+
+// NewResponseError creates an error object that implements the error interface.
+func NewResponseError(cause error, response *http.Response, description string) error {
+ if responseErrorFactory != nil {
+ return responseErrorFactory(cause, response, description)
+ }
+ return &responseError{
+ ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
+ response: response,
+ description: description,
+ }
+}
+
+// responseError is the internal struct that implements the public ResponseError interface.
+type responseError struct {
+ pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause
+ response *http.Response
+ description string
+}
+
+// Error implements the error interface's Error method to return a string representation of the error.
+func (e *responseError) Error() string {
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode)
+ fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description)
+ s := b.String()
+ return e.ErrorNode.Error(s)
+}
+
+// Response implements the ResponseError interface's method to return the HTTP response.
+func (e *responseError) Response() *http.Response {
+ return e.response
+}
+
+// RFC7807 PROBLEM ------------------------------------------------------------------------------------
+// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members.
+/*type RFC7807Problem struct {
+ // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation).
+ typeURI string // Should default to "about:blank"
+ // Optional: Short, human-readable summary (maybe localized).
+ title string
+ // Optional: HTTP status code generated by the origin server
+ status int
+ // Optional: Human-readable explanation for this problem occurance.
+ // Should help client correct the problem. Clients should NOT parse this string.
+ detail string
+ // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced).
+ instance string
+}
+// NewRFC7807Problem ...
+func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error {
+ return &RFC7807Problem{
+ typeURI: typeURI,
+ status: status,
+ title: fmt.Sprintf(titleFormat, a...),
+ }
+}
+// Error returns the error information as a string.
+func (e *RFC7807Problem) Error() string {
+ return e.title
+}
+// TypeURI ...
+func (e *RFC7807Problem) TypeURI() string {
+ if e.typeURI == "" {
+ e.typeURI = "about:blank"
+ }
+ return e.typeURI
+}
+// Members ...
+func (e *RFC7807Problem) Members() (status int, title, detail, instance string) {
+ return e.status, e.title, e.detail, e.instance
+}*/
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_service.go
new file mode 100644
index 0000000..760e27f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_service.go
@@ -0,0 +1,258 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// serviceClient is the client for the Service methods of the Azfile service.
+type serviceClient struct {
+ managementClient
+}
+
+// newServiceClient creates an instance of the serviceClient client.
+func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
+ return serviceClient{newManagementClient(url, p)}
+}
+
+// GetProperties gets the properties of a storage account's File service, including properties for Storage Analytics
+// metrics and CORS (Cross-Origin Resource Sharing) rules.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client serviceClient) GetProperties(ctx context.Context, timeout *int32) (*StorageServiceProperties, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getPropertiesPreparer(timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*StorageServiceProperties), err
+}
+
+// getPropertiesPreparer prepares the GetProperties request.
+func (client serviceClient) getPropertiesPreparer(timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "service")
+ params.Set("comp", "properties")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getPropertiesResponder handles the response to the GetProperties request.
+func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &StorageServiceProperties{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// ListSharesSegment the List Shares Segment operation returns a list of the shares and share snapshots under the
+// specified account.
+//
+// prefix is filters the results to return only entries whose name begins with the specified prefix. marker is a string
+// value that identifies the portion of the list to be returned with the next list operation. The operation returns a
+// marker value within the response body if the list returned was not complete. The marker value may then be used in a
+// subsequent call to request the next set of list items. The marker value is opaque to the client. maxresults is
+// specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value
+// greater than 5,000, the server will return up to 5,000 items. include is include this parameter to specify one or
+// more datasets to include in the response. timeout is the timeout parameter is expressed in seconds. For more
+// information, see Setting
+// Timeouts for File Service Operations.
+func (client serviceClient) ListSharesSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListSharesIncludeType, timeout *int32) (*ListSharesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: maxresults,
+ constraints: []constraint{{target: "maxresults", name: null, rule: false,
+ chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.listSharesSegmentPreparer(prefix, marker, maxresults, include, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listSharesSegmentResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ListSharesResponse), err
+}
+
+// listSharesSegmentPreparer prepares the ListSharesSegment request.
+func (client serviceClient) listSharesSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListSharesIncludeType, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if prefix != nil && len(*prefix) > 0 {
+ params.Set("prefix", *prefix)
+ }
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if maxresults != nil {
+ params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
+ }
+ if include != nil && len(include) > 0 {
+ params.Set("include", joinConst(include, ","))
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "list")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// listSharesSegmentResponder handles the response to the ListSharesSegment request.
+func (client serviceClient) listSharesSegmentResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &ListSharesResponse{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// SetProperties sets properties for a storage account's File service endpoint, including properties for Storage
+// Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
+//
+// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds.
+// For more information, see Setting
+// Timeouts for File Service Operations.
+func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32) (*ServiceSetPropertiesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: storageServiceProperties,
+ constraints: []constraint{{target: "storageServiceProperties.HourMetrics", name: null, rule: false,
+ chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false,
+ chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false,
+ chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMaximum, rule: 365, chain: nil},
+ {target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil},
+ }},
+ }},
+ }},
+ {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false,
+ chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false,
+ chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false,
+ chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMaximum, rule: 365, chain: nil},
+ {target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil},
+ }},
+ }},
+ }}}},
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setPropertiesPreparer(storageServiceProperties, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ServiceSetPropertiesResponse), err
+}
+
+// setPropertiesPreparer prepares the SetProperties request.
+func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "service")
+ params.Set("comp", "properties")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ b, err := xml.Marshal(storageServiceProperties)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to marshal request body")
+ }
+ req.Header.Set("Content-Type", "application/xml")
+ err = req.SetBody(bytes.NewReader(b))
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to set request body")
+ }
+ return req, nil
+}
+
+// setPropertiesResponder handles the response to the SetProperties request.
+func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_share.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_share.go
new file mode 100644
index 0000000..e6bf865
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_share.go
@@ -0,0 +1,694 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// shareClient is the client for the Share methods of the Azfile service.
+type shareClient struct {
+ managementClient
+}
+
+// newShareClient creates an instance of the shareClient client.
+func newShareClient(url url.URL, p pipeline.Pipeline) shareClient {
+ return shareClient{newManagementClient(url, p)}
+}
+
+// Create creates a new share under the specified account. If the share with the same name already exists, the
+// operation fails.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+// quota is specifies the maximum size of the share, in gigabytes.
+func (client shareClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, quota *int32) (*ShareCreateResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
+ {targetValue: quota,
+ constraints: []constraint{{target: "quota", name: null, rule: false,
+ chain: []constraint{{target: "quota", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.createPreparer(timeout, metadata, quota)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareCreateResponse), err
+}
+
+// createPreparer prepares the Create request.
+func (client shareClient) createPreparer(timeout *int32, metadata map[string]string, quota *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ req.URL.RawQuery = params.Encode()
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ if quota != nil {
+ req.Header.Set("x-ms-share-quota", strconv.FormatInt(int64(*quota), 10))
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// createResponder handles the response to the Create request.
+func (client shareClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareCreateResponse{rawResponse: resp.Response()}, err
+}
+
+// CreatePermission create a permission (a security descriptor).
+//
+// sharePermission is a permission (a security descriptor) at the share level. timeout is the timeout parameter is
+// expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client shareClient) CreatePermission(ctx context.Context, sharePermission SharePermission, timeout *int32) (*ShareCreatePermissionResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.createPermissionPreparer(sharePermission, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createPermissionResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareCreatePermissionResponse), err
+}
+
+// createPermissionPreparer prepares the CreatePermission request.
+func (client shareClient) createPermissionPreparer(sharePermission SharePermission, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "filepermission")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ b, err := xml.Marshal(sharePermission)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to marshal request body")
+ }
+ req.Header.Set("Content-Type", "application/xml")
+ err = req.SetBody(bytes.NewReader(b))
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to set request body")
+ }
+ return req, nil
+}
+
+// createPermissionResponder handles the response to the CreatePermission request.
+func (client shareClient) createPermissionResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareCreatePermissionResponse{rawResponse: resp.Response()}, err
+}
+
+// CreateSnapshot creates a read-only snapshot of a share.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+func (client shareClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string) (*ShareCreateSnapshotResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.createSnapshotPreparer(timeout, metadata)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareCreateSnapshotResponse), err
+}
+
+// createSnapshotPreparer prepares the CreateSnapshot request.
+func (client shareClient) createSnapshotPreparer(timeout *int32, metadata map[string]string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "snapshot")
+ req.URL.RawQuery = params.Encode()
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// createSnapshotResponder handles the response to the CreateSnapshot request.
+func (client shareClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareCreateSnapshotResponse{rawResponse: resp.Response()}, err
+}
+
+// Delete operation marks the specified share or share snapshot for deletion. The share or share snapshot and any files
+// contained within it are later deleted during garbage collection.
+//
+// sharesnapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot
+// to query. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. deleteSnapshots is specifies the option include to delete the base share
+// and all of its snapshots.
+func (client shareClient) Delete(ctx context.Context, sharesnapshot *string, timeout *int32, deleteSnapshots DeleteSnapshotsOptionType) (*ShareDeleteResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.deletePreparer(sharesnapshot, timeout, deleteSnapshots)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareDeleteResponse), err
+}
+
+// deletePreparer prepares the Delete request.
+func (client shareClient) deletePreparer(sharesnapshot *string, timeout *int32, deleteSnapshots DeleteSnapshotsOptionType) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("DELETE", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if deleteSnapshots != DeleteSnapshotsOptionNone {
+ req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots))
+ }
+ return req, nil
+}
+
+// deleteResponder handles the response to the Delete request.
+func (client shareClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareDeleteResponse{rawResponse: resp.Response()}, err
+}
+
+// GetAccessPolicy returns information about stored access policies specified on the share.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client shareClient) GetAccessPolicy(ctx context.Context, timeout *int32) (*SignedIdentifiers, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getAccessPolicyPreparer(timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*SignedIdentifiers), err
+}
+
+// getAccessPolicyPreparer prepares the GetAccessPolicy request.
+func (client shareClient) getAccessPolicyPreparer(timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "acl")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getAccessPolicyResponder handles the response to the GetAccessPolicy request.
+func (client shareClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &SignedIdentifiers{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// GetPermission returns the permission (security descriptor) for a given key
+//
+// filePermissionKey is key of the permission to be set for the directory/file. Note: Only one of the
+// x-ms-file-permission or x-ms-file-permission-key should be specified. timeout is the timeout parameter is expressed
+// in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client shareClient) GetPermission(ctx context.Context, filePermissionKey *string, timeout *int32) (*SharePermission, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getPermissionPreparer(filePermissionKey, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPermissionResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*SharePermission), err
+}
+
+// getPermissionPreparer prepares the GetPermission request.
+func (client shareClient) getPermissionPreparer(filePermissionKey *string, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "filepermission")
+ req.URL.RawQuery = params.Encode()
+ if filePermissionKey != nil {
+ req.Header.Set("x-ms-file-permission-key", *filePermissionKey)
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getPermissionResponder handles the response to the GetPermission request.
+func (client shareClient) getPermissionResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &SharePermission{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// GetProperties returns all user-defined metadata and system properties for the specified share or share snapshot. The
+// data returned does not include the share's list of files.
+//
+// sharesnapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot
+// to query. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client shareClient) GetProperties(ctx context.Context, sharesnapshot *string, timeout *int32) (*ShareGetPropertiesResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getPropertiesPreparer(sharesnapshot, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareGetPropertiesResponse), err
+}
+
+// getPropertiesPreparer prepares the GetProperties request.
+func (client shareClient) getPropertiesPreparer(sharesnapshot *string, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if sharesnapshot != nil && len(*sharesnapshot) > 0 {
+ params.Set("sharesnapshot", *sharesnapshot)
+ }
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getPropertiesResponder handles the response to the GetProperties request.
+func (client shareClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareGetPropertiesResponse{rawResponse: resp.Response()}, err
+}
+
+// GetStatistics retrieves statistics related to the share.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations.
+func (client shareClient) GetStatistics(ctx context.Context, timeout *int32) (*ShareStats, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getStatisticsPreparer(timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareStats), err
+}
+
+// getStatisticsPreparer prepares the GetStatistics request.
+func (client shareClient) getStatisticsPreparer(timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "stats")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getStatisticsResponder handles the response to the GetStatistics request.
+func (client shareClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &ShareStats{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// SetAccessPolicy sets a stored access policy for use with shared access signatures.
+//
+// shareACL is the ACL for the share. timeout is the timeout parameter is expressed in seconds. For more information,
+// see Setting
+// Timeouts for File Service Operations.
+func (client shareClient) SetAccessPolicy(ctx context.Context, shareACL []SignedIdentifier, timeout *int32) (*ShareSetAccessPolicyResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setAccessPolicyPreparer(shareACL, timeout)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareSetAccessPolicyResponse), err
+}
+
+// setAccessPolicyPreparer prepares the SetAccessPolicy request.
+func (client shareClient) setAccessPolicyPreparer(shareACL []SignedIdentifier, timeout *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "acl")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ b, err := xml.Marshal(SignedIdentifiers{Items: shareACL})
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to marshal request body")
+ }
+ req.Header.Set("Content-Type", "application/xml")
+ err = req.SetBody(bytes.NewReader(b))
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to set request body")
+ }
+ return req, nil
+}
+
+// setAccessPolicyResponder handles the response to the SetAccessPolicy request.
+func (client shareClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareSetAccessPolicyResponse{rawResponse: resp.Response()}, err
+}
+
+// SetMetadata sets one or more user-defined name-value pairs for the specified share.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. metadata is a name-value pair to associate with a file storage object.
+func (client shareClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string) (*ShareSetMetadataResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setMetadataPreparer(timeout, metadata)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareSetMetadataResponse), err
+}
+
+// setMetadataPreparer prepares the SetMetadata request.
+func (client shareClient) setMetadataPreparer(timeout *int32, metadata map[string]string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "metadata")
+ req.URL.RawQuery = params.Encode()
+ if metadata != nil {
+ for k, v := range metadata {
+ req.Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// setMetadataResponder handles the response to the SetMetadata request.
+func (client shareClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareSetMetadataResponse{rawResponse: resp.Response()}, err
+}
+
+// SetQuota sets quota for the specified share.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for File Service Operations. quota is specifies the maximum size of the share, in gigabytes.
+func (client shareClient) SetQuota(ctx context.Context, timeout *int32, quota *int32) (*ShareSetQuotaResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
+ {targetValue: quota,
+ constraints: []constraint{{target: "quota", name: null, rule: false,
+ chain: []constraint{{target: "quota", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setQuotaPreparer(timeout, quota)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setQuotaResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ShareSetQuotaResponse), err
+}
+
+// setQuotaPreparer prepares the SetQuota request.
+func (client shareClient) setQuotaPreparer(timeout *int32, quota *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "share")
+ params.Set("comp", "properties")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if quota != nil {
+ req.Header.Set("x-ms-share-quota", strconv.FormatInt(int64(*quota), 10))
+ }
+ return req, nil
+}
+
+// setQuotaResponder handles the response to the SetQuota request.
+func (client shareClient) setQuotaResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ShareSetQuotaResponse{rawResponse: resp.Response()}, err
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_validation.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_validation.go
new file mode 100644
index 0000000..87c006b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_validation.go
@@ -0,0 +1,367 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "fmt"
+ "github.com/Azure/azure-pipeline-go/pipeline"
+ "reflect"
+ "regexp"
+ "strings"
+)
+
+// Constraint stores constraint name, target field name
+// Rule and chain validations.
+type constraint struct {
+ // Target field name for validation.
+ target string
+
+ // Constraint name e.g. minLength, MaxLength, Pattern, etc.
+ name string
+
+ // Rule for constraint e.g. greater than 10, less than 5 etc.
+ rule interface{}
+
+ // Chain validations for struct type
+ chain []constraint
+}
+
+// Validation stores parameter-wise validation.
+type validation struct {
+ targetValue interface{}
+ constraints []constraint
+}
+
+// Constraint list
+const (
+ empty = "Empty"
+ null = "Null"
+ readOnly = "ReadOnly"
+ pattern = "Pattern"
+ maxLength = "MaxLength"
+ minLength = "MinLength"
+ maxItems = "MaxItems"
+ minItems = "MinItems"
+ multipleOf = "MultipleOf"
+ uniqueItems = "UniqueItems"
+ inclusiveMaximum = "InclusiveMaximum"
+ exclusiveMaximum = "ExclusiveMaximum"
+ exclusiveMinimum = "ExclusiveMinimum"
+ inclusiveMinimum = "InclusiveMinimum"
+)
+
+// Validate method validates constraints on parameter
+// passed in validation array.
+func validate(m []validation) error {
+ for _, item := range m {
+ v := reflect.ValueOf(item.targetValue)
+ for _, constraint := range item.constraints {
+ var err error
+ switch v.Kind() {
+ case reflect.Ptr:
+ err = validatePtr(v, constraint)
+ case reflect.String:
+ err = validateString(v, constraint)
+ case reflect.Struct:
+ err = validateStruct(v, constraint)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ err = validateInt(v, constraint)
+ case reflect.Float32, reflect.Float64:
+ err = validateFloat(v, constraint)
+ case reflect.Array, reflect.Slice, reflect.Map:
+ err = validateArrayMap(v, constraint)
+ default:
+ err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func validateStruct(x reflect.Value, v constraint, name ...string) error {
+ //Get field name from target name which is in format a.b.c
+ s := strings.Split(v.target, ".")
+ f := x.FieldByName(s[len(s)-1])
+ if isZero(f) {
+ return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target))
+ }
+ err := validate([]validation{
+ {
+ targetValue: getInterfaceValue(f),
+ constraints: []constraint{v},
+ },
+ })
+ return err
+}
+
+func validatePtr(x reflect.Value, v constraint) error {
+ if v.name == readOnly {
+ if !x.IsNil() {
+ return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request")
+ }
+ return nil
+ }
+ if x.IsNil() {
+ return checkNil(x, v)
+ }
+ if v.chain != nil {
+ return validate([]validation{
+ {
+ targetValue: getInterfaceValue(x.Elem()),
+ constraints: v.chain,
+ },
+ })
+ }
+ return nil
+}
+
+func validateInt(x reflect.Value, v constraint) error {
+ i := x.Int()
+ r, ok := v.rule.(int)
+ if !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
+ }
+ switch v.name {
+ case multipleOf:
+ if i%int64(r) != 0 {
+ return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r))
+ }
+ case exclusiveMinimum:
+ if i <= int64(r) {
+ return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
+ }
+ case exclusiveMaximum:
+ if i >= int64(r) {
+ return createError(x, v, fmt.Sprintf("value must be less than %v", r))
+ }
+ case inclusiveMinimum:
+ if i < int64(r) {
+ return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
+ }
+ case inclusiveMaximum:
+ if i > int64(r) {
+ return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name))
+ }
+ return nil
+}
+
+func validateFloat(x reflect.Value, v constraint) error {
+ f := x.Float()
+ r, ok := v.rule.(float64)
+ if !ok {
+ return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule))
+ }
+ switch v.name {
+ case exclusiveMinimum:
+ if f <= r {
+ return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
+ }
+ case exclusiveMaximum:
+ if f >= r {
+ return createError(x, v, fmt.Sprintf("value must be less than %v", r))
+ }
+ case inclusiveMinimum:
+ if f < r {
+ return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
+ }
+ case inclusiveMaximum:
+ if f > r {
+ return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name))
+ }
+ return nil
+}
+
+func validateString(x reflect.Value, v constraint) error {
+ s := x.String()
+ switch v.name {
+ case empty:
+ if len(s) == 0 {
+ return checkEmpty(x, v)
+ }
+ case pattern:
+ reg, err := regexp.Compile(v.rule.(string))
+ if err != nil {
+ return createError(x, v, err.Error())
+ }
+ if !reg.MatchString(s) {
+ return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule))
+ }
+ case maxLength:
+ if _, ok := v.rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
+ }
+ if len(s) > v.rule.(int) {
+ return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule))
+ }
+ case minLength:
+ if _, ok := v.rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
+ }
+ if len(s) < v.rule.(int) {
+ return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule))
+ }
+ case readOnly:
+ if len(s) > 0 {
+ return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request")
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name))
+ }
+ if v.chain != nil {
+ return validate([]validation{
+ {
+ targetValue: getInterfaceValue(x),
+ constraints: v.chain,
+ },
+ })
+ }
+ return nil
+}
+
+func validateArrayMap(x reflect.Value, v constraint) error {
+ switch v.name {
+ case null:
+ if x.IsNil() {
+ return checkNil(x, v)
+ }
+ case empty:
+ if x.IsNil() || x.Len() == 0 {
+ return checkEmpty(x, v)
+ }
+ case maxItems:
+ if _, ok := v.rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule))
+ }
+ if x.Len() > v.rule.(int) {
+ return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len()))
+ }
+ case minItems:
+ if _, ok := v.rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule))
+ }
+ if x.Len() < v.rule.(int) {
+ return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len()))
+ }
+ case uniqueItems:
+ if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
+ if !checkForUniqueInArray(x) {
+ return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x))
+ }
+ } else if x.Kind() == reflect.Map {
+ if !checkForUniqueInMap(x) {
+ return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x))
+ }
+ } else {
+ return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind()))
+ }
+ case readOnly:
+ if x.Len() != 0 {
+ return createError(x, v, "readonly parameter; must send as nil or empty in request")
+ }
+ case pattern:
+ reg, err := regexp.Compile(v.rule.(string))
+ if err != nil {
+ return createError(x, v, err.Error())
+ }
+ keys := x.MapKeys()
+ for _, k := range keys {
+ if !reg.MatchString(k.String()) {
+ return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule))
+ }
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name))
+ }
+ if v.chain != nil {
+ return validate([]validation{
+ {
+ targetValue: getInterfaceValue(x),
+ constraints: v.chain,
+ },
+ })
+ }
+ return nil
+}
+
+func checkNil(x reflect.Value, v constraint) error {
+ if _, ok := v.rule.(bool); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule))
+ }
+ if v.rule.(bool) {
+ return createError(x, v, "value can not be null; required parameter")
+ }
+ return nil
+}
+
+func checkEmpty(x reflect.Value, v constraint) error {
+ if _, ok := v.rule.(bool); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule))
+ }
+ if v.rule.(bool) {
+ return createError(x, v, "value can not be null or empty; required parameter")
+ }
+ return nil
+}
+
+func checkForUniqueInArray(x reflect.Value) bool {
+ if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
+ return false
+ }
+ arrOfInterface := make([]interface{}, x.Len())
+ for i := 0; i < x.Len(); i++ {
+ arrOfInterface[i] = x.Index(i).Interface()
+ }
+ m := make(map[interface{}]bool)
+ for _, val := range arrOfInterface {
+ if m[val] {
+ return false
+ }
+ m[val] = true
+ }
+ return true
+}
+
+func checkForUniqueInMap(x reflect.Value) bool {
+ if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
+ return false
+ }
+ mapOfInterface := make(map[interface{}]interface{}, x.Len())
+ keys := x.MapKeys()
+ for _, k := range keys {
+ mapOfInterface[k.Interface()] = x.MapIndex(k).Interface()
+ }
+ m := make(map[interface{}]bool)
+ for _, val := range mapOfInterface {
+ if m[val] {
+ return false
+ }
+ m[val] = true
+ }
+ return true
+}
+
+func getInterfaceValue(x reflect.Value) interface{} {
+ if x.Kind() == reflect.Invalid {
+ return nil
+ }
+ return x.Interface()
+}
+
+func isZero(x interface{}) bool {
+ return x == reflect.Zero(reflect.TypeOf(x)).Interface()
+}
+
+func createError(x reflect.Value, v constraint, message string) error {
+ return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s",
+ v.target, v.name, getInterfaceValue(x), message))
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_version.go
new file mode 100644
index 0000000..b1d3649
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_generated_version.go
@@ -0,0 +1,14 @@
+package azfile
+
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/0.0.0 azfile/2019-02-02"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return "0.0.0"
+}
diff --git a/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_response_helpers.go
new file mode 100644
index 0000000..21fd25e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-file-go/azfile/zz_response_helpers.go
@@ -0,0 +1,314 @@
+package azfile
+
+import (
+ "context"
+ "encoding/xml"
+ "net/http"
+ "time"
+)
+
+// FileHTTPHeaders contains read/writeable file properties.
+type FileHTTPHeaders struct {
+ ContentType string
+ ContentMD5 []byte
+ ContentEncoding string
+ ContentLanguage string
+ ContentDisposition string
+ CacheControl string
+}
+
+// NewHTTPHeaders returns the user-modifiable properties for this file.
+func (dr RetryableDownloadResponse) NewHTTPHeaders() FileHTTPHeaders {
+ return FileHTTPHeaders{
+ ContentType: dr.ContentType(),
+ ContentEncoding: dr.ContentEncoding(),
+ ContentLanguage: dr.ContentLanguage(),
+ ContentDisposition: dr.ContentDisposition(),
+ CacheControl: dr.CacheControl(),
+ ContentMD5: dr.ContentMD5(),
+ }
+}
+
+// NewHTTPHeaders returns the user-modifiable properties for this file.
+func (fgpr FileGetPropertiesResponse) NewHTTPHeaders() FileHTTPHeaders {
+ return FileHTTPHeaders{
+ ContentType: fgpr.ContentType(),
+ ContentEncoding: fgpr.ContentEncoding(),
+ ContentLanguage: fgpr.ContentLanguage(),
+ ContentDisposition: fgpr.ContentDisposition(),
+ CacheControl: fgpr.CacheControl(),
+ ContentMD5: fgpr.ContentMD5(),
+ }
+}
+
+// RetryableDownloadResponse wraps AutoRest generated DownloadResponse and helps to provide info for retry.
+type RetryableDownloadResponse struct {
+ dr *DownloadResponse
+
+ // Fields need for retry.
+ ctx context.Context
+ f FileURL
+ info HTTPGetterInfo
+}
+
+// Response returns the raw HTTP response object.
+func (dr RetryableDownloadResponse) Response() *http.Response {
+ return dr.dr.Response()
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dr RetryableDownloadResponse) StatusCode() int {
+ return dr.dr.StatusCode()
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dr RetryableDownloadResponse) Status() string {
+ return dr.dr.Status()
+}
+
+// AcceptRanges returns the value for header Accept-Ranges.
+func (dr RetryableDownloadResponse) AcceptRanges() string {
+ return dr.dr.AcceptRanges()
+}
+
+// CacheControl returns the value for header Cache-Control.
+func (dr RetryableDownloadResponse) CacheControl() string {
+ return dr.dr.CacheControl()
+}
+
+// ContentDisposition returns the value for header Content-Disposition.
+func (dr RetryableDownloadResponse) ContentDisposition() string {
+ return dr.dr.ContentDisposition()
+}
+
+// ContentEncoding returns the value for header Content-Encoding.
+func (dr RetryableDownloadResponse) ContentEncoding() string {
+ return dr.dr.ContentEncoding()
+}
+
+// ContentLanguage returns the value for header Content-Language.
+func (dr RetryableDownloadResponse) ContentLanguage() string {
+ return dr.dr.ContentLanguage()
+}
+
+// ContentLength returns the value for header Content-Length.
+func (dr RetryableDownloadResponse) ContentLength() int64 {
+ return dr.dr.ContentLength()
+}
+
+// ContentRange returns the value for header Content-Range.
+func (dr RetryableDownloadResponse) ContentRange() string {
+ return dr.dr.ContentRange()
+}
+
+// ContentType returns the value for header Content-Type.
+func (dr RetryableDownloadResponse) ContentType() string {
+ return dr.dr.ContentType()
+}
+
+// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
+func (dr RetryableDownloadResponse) CopyCompletionTime() time.Time {
+ return dr.dr.CopyCompletionTime()
+}
+
+// CopyID returns the value for header x-ms-copy-id.
+func (dr RetryableDownloadResponse) CopyID() string {
+ return dr.dr.CopyID()
+}
+
+// CopyProgress returns the value for header x-ms-copy-progress.
+func (dr RetryableDownloadResponse) CopyProgress() string {
+ return dr.dr.CopyProgress()
+}
+
+// CopySource returns the value for header x-ms-copy-source.
+func (dr RetryableDownloadResponse) CopySource() string {
+ return dr.dr.CopySource()
+}
+
+// CopyStatus returns the value for header x-ms-copy-status.
+func (dr RetryableDownloadResponse) CopyStatus() CopyStatusType {
+ return dr.dr.CopyStatus()
+}
+
+// CopyStatusDescription returns the value for header x-ms-copy-status-description.
+func (dr RetryableDownloadResponse) CopyStatusDescription() string {
+ return dr.dr.CopyStatusDescription()
+}
+
+// Date returns the value for header Date.
+func (dr RetryableDownloadResponse) Date() time.Time {
+ return dr.dr.Date()
+}
+
+// ETag returns the value for header ETag.
+func (dr RetryableDownloadResponse) ETag() ETag {
+ return dr.dr.ETag()
+}
+
+// IsServerEncrypted returns the value for header x-ms-server-encrypted.
+func (dr RetryableDownloadResponse) IsServerEncrypted() string {
+ return dr.dr.IsServerEncrypted()
+}
+
+// LastModified returns the value for header Last-Modified.
+func (dr RetryableDownloadResponse) LastModified() time.Time {
+ return dr.dr.LastModified()
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dr RetryableDownloadResponse) RequestID() string {
+ return dr.dr.RequestID()
+}
+
+// Version returns the value for header x-ms-version.
+func (dr RetryableDownloadResponse) Version() string {
+ return dr.dr.Version()
+}
+
+// NewMetadata returns user-defined key/value pairs.
+func (dr RetryableDownloadResponse) NewMetadata() Metadata {
+ return dr.dr.NewMetadata()
+}
+
+// FileContentMD5 returns the value for header x-ms-content-md5.
+func (dr RetryableDownloadResponse) FileContentMD5() []byte {
+ return dr.dr.FileContentMD5()
+}
+
+// ContentMD5 returns the value for header Content-MD5.
+func (dr RetryableDownloadResponse) ContentMD5() []byte {
+ return dr.dr.ContentMD5()
+}
+
+// FileItem - Listed file item.
+type FileItem struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"File"`
+ // Name - Name of the entry.
+ Name string `xml:"Name"`
+ Properties *FileProperty `xml:"Properties"`
+}
+
+// DirectoryItem - Listed directory item.
+type DirectoryItem struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Directory"`
+ // Name - Name of the entry.
+ Name string `xml:"Name"`
+}
+
+// ListFilesAndDirectoriesSegmentResponse - An enumeration of directories and files.
+type ListFilesAndDirectoriesSegmentResponse struct {
+ rawResponse *http.Response
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"EnumerationResults"`
+ ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
+ ShareName string `xml:"ShareName,attr"`
+ ShareSnapshot *string `xml:"ShareSnapshot,attr"`
+ DirectoryPath string `xml:"DirectoryPath,attr"`
+ Prefix string `xml:"Prefix"`
+ Marker *string `xml:"Marker"`
+ MaxResults *int32 `xml:"MaxResults"`
+ FileItems []FileItem `xml:"Entries>File"`
+ DirectoryItems []DirectoryItem `xml:"Entries>Directory"`
+ NextMarker Marker `xml:"NextMarker"`
+}
+
+// Response returns the raw HTTP response object.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) Response() *http.Response {
+ return ldafr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) StatusCode() int {
+ return ldafr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ldafr ListFilesAndDirectoriesSegmentResponse) Status() string {
+ return ldafr.rawResponse.Status
+}
+
+// ContentType returns the value for header Content-Type.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) ContentType() string {
+ return ldafr.rawResponse.Header.Get("Content-Type")
+}
+
+// Date returns the value for header Date.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) Date() time.Time {
+ s := ldafr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) RequestID() string {
+ return ldafr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) Version() string {
+ return ldafr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (ldafr ListFilesAndDirectoriesSegmentResponse) ErrorCode() string {
+ return ldafr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// MetricProperties definies convenience struct for Metrics,
+type MetricProperties struct {
+ // MetricEnabled - Indicates whether metrics are enabled for the File service.
+ MetricEnabled bool
+ // Version - The version of Storage Analytics to configure.
+ // Version string, comment out version, as it's mandatory and should be 1.0
+ // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations.
+ IncludeAPIs bool
+ // RetentionPolicyEnabled - Indicates whether a rentention policy is enabled for the File service.
+ RetentionPolicyEnabled bool
+ // RetentionDays - Indicates the number of days that metrics data should be retained.
+ RetentionDays int32
+}
+
+// FileServiceProperties defines convenience struct for StorageServiceProperties
+type FileServiceProperties struct {
+ rawResponse *http.Response
+ // HourMetrics - A summary of request statistics grouped by API in hourly aggregates for files.
+ HourMetrics MetricProperties
+ // MinuteMetrics - A summary of request statistics grouped by API in minute aggregates for files.
+ MinuteMetrics MetricProperties
+ // Cors - The set of CORS rules.
+ Cors []CorsRule
+}
+
+// Response returns the raw HTTP response object.
+func (fsp FileServiceProperties) Response() *http.Response {
+ return fsp.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fsp FileServiceProperties) StatusCode() int {
+ return fsp.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fsp FileServiceProperties) Status() string {
+ return fsp.rawResponse.Status
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fsp FileServiceProperties) RequestID() string {
+ return fsp.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fsp FileServiceProperties) Version() string {
+ return fsp.rawResponse.Header.Get("x-ms-version")
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
new file mode 100644
index 0000000..fec416a
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
@@ -0,0 +1,292 @@
+# Azure Active Directory authentication for Go
+
+This is a standalone package for authenticating with Azure Active
+Directory from other Go libraries and applications, in particular the [Azure SDK
+for Go](https://github.com/Azure/azure-sdk-for-go).
+
+Note: Despite the package's name it is not related to other "ADAL" libraries
+maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
+should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
+or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
+trackers.
+
+## Install
+
+```bash
+go get -u github.com/Azure/go-autorest/autorest/adal
+```
+
+## Usage
+
+An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
+
+### Register an Azure AD Application with secret
+
+
+1. Register a new application with a `secret` credential
+
+ ```
+ az ad app create \
+ --display-name example-app \
+ --homepage https://example-app/home \
+ --identifier-uris https://example-app/app \
+ --password secret
+ ```
+
+2. Create a service principal using the `Application ID` from previous step
+
+ ```
+ az ad sp create --id "Application ID"
+ ```
+
+ * Replace `Application ID` with `appId` from step 1.
+
+### Register an Azure AD Application with certificate
+
+1. Create a private key
+
+ ```
+ openssl genrsa -out "example-app.key" 2048
+ ```
+
+2. Create the certificate
+
+ ```
+ openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
+ openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
+ ```
+
+3. Create the PKCS12 version of the certificate containing also the private key
+
+ ```
+ openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
+
+ ```
+
+4. Register a new application with the certificate content form `example-app.crt`
+
+ ```
+ certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
+
+ az ad app create \
+ --display-name example-app \
+ --homepage https://example-app/home \
+ --identifier-uris https://example-app/app \
+ --key-usage Verify --end-date 2018-01-01 \
+ --key-value "${certificateContents}"
+ ```
+
+5. Create a service principal using the `Application ID` from previous step
+
+ ```
+ az ad sp create --id "APPLICATION_ID"
+ ```
+
+ * Replace `APPLICATION_ID` with `appId` from step 4.
+
+
+### Grant the necessary permissions
+
+Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
+level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
+which can be assigned to a service principal of an Azure AD application depending of your needs.
+
+```
+az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
+```
+
+* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
+* Replace the `ROLE_NAME` with a role name of your choice.
+
+It is also possible to define custom role definitions.
+
+```
+az role definition create --role-definition role-definition.json
+```
+
+* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
+
+
+### Acquire Access Token
+
+The common configuration used by all flows:
+
+```Go
+const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
+tenantID := "TENANT_ID"
+oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
+
+applicationID := "APPLICATION_ID"
+
+callback := func(token adal.Token) error {
+ // This is called after the token is acquired
+}
+
+// The resource for which the token is acquired
+resource := "https://management.core.windows.net/"
+```
+
+* Replace the `TENANT_ID` with your tenant ID.
+* Replace the `APPLICATION_ID` with the value from previous section.
+
+#### Client Credentials
+
+```Go
+applicationSecret := "APPLICATION_SECRET"
+
+spt, err := adal.NewServicePrincipalToken(
+ *oauthConfig,
+ appliationID,
+ applicationSecret,
+ resource,
+ callbacks...)
+if err != nil {
+ return nil, err
+}
+
+// Acquire a new access token
+err = spt.Refresh()
+if (err == nil) {
+ token := spt.Token
+}
+```
+
+* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
+
+#### Client Certificate
+
+```Go
+certificatePath := "./example-app.pfx"
+
+certData, err := ioutil.ReadFile(certificatePath)
+if err != nil {
+ return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
+}
+
+// Get the certificate and private key from pfx file
+certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
+if err != nil {
+ return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
+}
+
+spt, err := adal.NewServicePrincipalTokenFromCertificate(
+ *oauthConfig,
+ applicationID,
+ certificate,
+ rsaPrivateKey,
+ resource,
+ callbacks...)
+
+// Acquire a new access token
+err = spt.Refresh()
+if (err == nil) {
+ token := spt.Token
+}
+```
+
+* Update the certificate path to point to the example-app.pfx file which was created in previous section.
+
+
+#### Device Code
+
+```Go
+oauthClient := &http.Client{}
+
+// Acquire the device code
+deviceCode, err := adal.InitiateDeviceAuth(
+ oauthClient,
+ *oauthConfig,
+ applicationID,
+ resource)
+if err != nil {
+ return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
+}
+
+// Display the authentication message
+fmt.Println(*deviceCode.Message)
+
+// Wait here until the user is authenticated
+token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
+if err != nil {
+ return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
+}
+
+spt, err := adal.NewServicePrincipalTokenFromManualToken(
+ *oauthConfig,
+ applicationID,
+ resource,
+ *token,
+ callbacks...)
+
+if (err == nil) {
+ token := spt.Token
+}
+```
+
+#### Username password authenticate
+
+```Go
+spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
+ *oauthConfig,
+ applicationID,
+ username,
+ password,
+ resource,
+ callbacks...)
+
+if (err == nil) {
+ token := spt.Token
+}
+```
+
+#### Authorization code authenticate
+
+``` Go
+spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
+ *oauthConfig,
+ applicationID,
+ clientSecret,
+ authorizationCode,
+ redirectURI,
+ resource,
+ callbacks...)
+
+err = spt.Refresh()
+if (err == nil) {
+ token := spt.Token
+}
+```
+
+### Command Line Tool
+
+A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
+
+```
+adal -h
+
+Usage of ./adal:
+ -applicationId string
+ application id
+ -certificatePath string
+ path to pk12/PFC application certificate
+ -mode string
+ authentication mode (device, secret, cert, refresh) (default "device")
+ -resource string
+ resource for which the token is requested
+ -secret string
+ application secret
+ -tenantId string
+ tenant id
+ -tokenCachePath string
+ location of oath token cache (default "/home/cgc/.adal/accessToken.json")
+```
+
+Example acquire a token for `https://management.core.windows.net/` using device code flow:
+
+```
+adal -mode device \
+ -applicationId "APPLICATION_ID" \
+ -tenantId "TENANT_ID" \
+ -resource https://management.core.windows.net/
+
+```
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
new file mode 100644
index 0000000..fa59647
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
@@ -0,0 +1,151 @@
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+)
+
+const (
+ activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
+)
+
+// OAuthConfig represents the endpoints needed
+// in OAuth operations
+type OAuthConfig struct {
+ AuthorityEndpoint url.URL `json:"authorityEndpoint"`
+ AuthorizeEndpoint url.URL `json:"authorizeEndpoint"`
+ TokenEndpoint url.URL `json:"tokenEndpoint"`
+ DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"`
+}
+
+// IsZero returns true if the OAuthConfig object is zero-initialized.
+func (oac OAuthConfig) IsZero() bool {
+ return oac == OAuthConfig{}
+}
+
+func validateStringParam(param, name string) error {
+ if len(param) == 0 {
+ return fmt.Errorf("parameter '" + name + "' cannot be empty")
+ }
+ return nil
+}
+
+// NewOAuthConfig returns an OAuthConfig with tenant specific urls
+func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
+ apiVer := "1.0"
+ return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
+}
+
+// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
+// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
+func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
+ if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
+ return nil, err
+ }
+ api := ""
+ // it's legal for tenantID to be empty so don't validate it
+ if apiVersion != nil {
+ if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
+ return nil, err
+ }
+ api = fmt.Sprintf("?api-version=%s", *apiVersion)
+ }
+ u, err := url.Parse(activeDirectoryEndpoint)
+ if err != nil {
+ return nil, err
+ }
+ authorityURL, err := u.Parse(tenantID)
+ if err != nil {
+ return nil, err
+ }
+ authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
+ if err != nil {
+ return nil, err
+ }
+ tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
+ if err != nil {
+ return nil, err
+ }
+ deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
+ if err != nil {
+ return nil, err
+ }
+
+ return &OAuthConfig{
+ AuthorityEndpoint: *authorityURL,
+ AuthorizeEndpoint: *authorizeURL,
+ TokenEndpoint: *tokenURL,
+ DeviceCodeEndpoint: *deviceCodeURL,
+ }, nil
+}
+
+// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
+type MultiTenantOAuthConfig interface {
+ PrimaryTenant() *OAuthConfig
+ AuxiliaryTenants() []*OAuthConfig
+}
+
+// OAuthOptions contains optional OAuthConfig creation arguments.
+type OAuthOptions struct {
+ APIVersion string
+}
+
+func (c OAuthOptions) apiVersion() string {
+ if c.APIVersion != "" {
+ return fmt.Sprintf("?api-version=%s", c.APIVersion)
+ }
+ return "1.0"
+}
+
+// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
+// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
+func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
+ if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
+ return nil, errors.New("must specify one to three auxiliary tenants")
+ }
+ mtCfg := multiTenantOAuthConfig{
+ cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
+ }
+ apiVer := options.apiVersion()
+ pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
+ }
+ mtCfg.cfgs[0] = pri
+ for i := range auxiliaryTenantIDs {
+ aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
+ if err != nil {
+ return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
+ }
+ mtCfg.cfgs[i+1] = aux
+ }
+ return mtCfg, nil
+}
+
+type multiTenantOAuthConfig struct {
+ // first config in the slice is the primary tenant
+ cfgs []*OAuthConfig
+}
+
+func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
+ return m.cfgs[0]
+}
+
+func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
+ return m.cfgs[1:]
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
new file mode 100644
index 0000000..914f8af
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
@@ -0,0 +1,269 @@
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+ This file is largely based on rjw57/oauth2device's code, with the follow differences:
+ * scope -> resource, and only allow a single one
+ * receive "Message" in the DeviceCode struct and show it to users as the prompt
+ * azure-xplat-cli has the following behavior that this emulates:
+ - does not send client_secret during the token exchange
+ - sends resource again in the token exchange request
+*/
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+const (
+ logPrefix = "autorest/adal/devicetoken:"
+)
+
+var (
+ // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
+ ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
+
+ // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
+ ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
+
+ // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
+ ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
+
+ // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
+ ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
+
+ // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
+ ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
+
+ // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
+ ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
+
+ // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
+ ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
+
+ errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
+ errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
+ errTokenSendingFails = "Error occurred while sending request with device code for a token"
+ errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
+ errStatusNotOK = "Error HTTP status != 200"
+)
+
+// DeviceCode is the object returned by the device auth endpoint
+// It contains information to instruct the user to complete the auth flow
+type DeviceCode struct {
+ DeviceCode *string `json:"device_code,omitempty"`
+ UserCode *string `json:"user_code,omitempty"`
+ VerificationURL *string `json:"verification_url,omitempty"`
+ ExpiresIn *int64 `json:"expires_in,string,omitempty"`
+ Interval *int64 `json:"interval,string,omitempty"`
+
+ Message *string `json:"message"` // Azure specific
+ Resource string // store the following, stored when initiating, used when exchanging
+ OAuthConfig OAuthConfig
+ ClientID string
+}
+
+// TokenError is the object returned by the token exchange endpoint
+// when something is amiss
+type TokenError struct {
+ Error *string `json:"error,omitempty"`
+ ErrorCodes []int `json:"error_codes,omitempty"`
+ ErrorDescription *string `json:"error_description,omitempty"`
+ Timestamp *string `json:"timestamp,omitempty"`
+ TraceID *string `json:"trace_id,omitempty"`
+}
+
+// DeviceToken is the object return by the token exchange endpoint
+// It can either look like a Token or an ErrorToken, so put both here
+// and check for presence of "Error" to know if we are in error state
+type deviceToken struct {
+ Token
+ TokenError
+}
+
+// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
+// that can be used with CheckForUserCompletion or WaitForUserCompletion.
+// Deprecated: use InitiateDeviceAuthWithContext() instead.
+func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
+ return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource)
+}
+
+// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode
+// that can be used with CheckForUserCompletion or WaitForUserCompletion.
+func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
+ v := url.Values{
+ "client_id": []string{clientID},
+ "resource": []string{resource},
+ }
+
+ s := v.Encode()
+ body := ioutil.NopCloser(strings.NewReader(s))
+
+ req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
+ }
+
+ req.ContentLength = int64(len(s))
+ req.Header.Set(contentType, mimeTypeFormPost)
+ resp, err := sender.Do(req.WithContext(ctx))
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
+ }
+ defer resp.Body.Close()
+
+ rb, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
+ }
+
+ if len(strings.Trim(string(rb), " ")) == 0 {
+ return nil, ErrDeviceCodeEmpty
+ }
+
+ var code DeviceCode
+ err = json.Unmarshal(rb, &code)
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
+ }
+
+ code.ClientID = clientID
+ code.Resource = resource
+ code.OAuthConfig = oauthConfig
+
+ return &code, nil
+}
+
+// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
+// to see if the device flow has: been completed, timed out, or otherwise failed
+// Deprecated: use CheckForUserCompletionWithContext() instead.
+func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
+ return CheckForUserCompletionWithContext(context.Background(), sender, code)
+}
+
+// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint
+// to see if the device flow has: been completed, timed out, or otherwise failed
+func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
+ v := url.Values{
+ "client_id": []string{code.ClientID},
+ "code": []string{*code.DeviceCode},
+ "grant_type": []string{OAuthGrantTypeDeviceCode},
+ "resource": []string{code.Resource},
+ }
+
+ s := v.Encode()
+ body := ioutil.NopCloser(strings.NewReader(s))
+
+ req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
+ }
+
+ req.ContentLength = int64(len(s))
+ req.Header.Set(contentType, mimeTypeFormPost)
+ resp, err := sender.Do(req.WithContext(ctx))
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
+ }
+ defer resp.Body.Close()
+
+ rb, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
+ }
+
+ if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
+ }
+ if len(strings.Trim(string(rb), " ")) == 0 {
+ return nil, ErrOAuthTokenEmpty
+ }
+
+ var token deviceToken
+ err = json.Unmarshal(rb, &token)
+ if err != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
+ }
+
+ if token.Error == nil {
+ return &token.Token, nil
+ }
+
+ switch *token.Error {
+ case "authorization_pending":
+ return nil, ErrDeviceAuthorizationPending
+ case "slow_down":
+ return nil, ErrDeviceSlowDown
+ case "access_denied":
+ return nil, ErrDeviceAccessDenied
+ case "code_expired":
+ return nil, ErrDeviceCodeExpired
+ default:
+ return nil, ErrDeviceGeneric
+ }
+}
+
+// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
+// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
+// Deprecated: use WaitForUserCompletionWithContext() instead.
+func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
+ return WaitForUserCompletionWithContext(context.Background(), sender, code)
+}
+
+// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error
+// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
+func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
+ intervalDuration := time.Duration(*code.Interval) * time.Second
+ waitDuration := intervalDuration
+
+ for {
+ token, err := CheckForUserCompletionWithContext(ctx, sender, code)
+
+ if err == nil {
+ return token, nil
+ }
+
+ switch err {
+ case ErrDeviceSlowDown:
+ waitDuration += waitDuration
+ case ErrDeviceAuthorizationPending:
+ // noop
+ default: // everything else is "fatal" to us
+ return nil, err
+ }
+
+ if waitDuration > (intervalDuration * 3) {
+ return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
+ }
+
+ select {
+ case <-time.After(waitDuration):
+ // noop
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
new file mode 100644
index 0000000..fdc5b90
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
@@ -0,0 +1,12 @@
+module github.com/Azure/go-autorest/autorest/adal
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest v0.9.0
+ github.com/Azure/go-autorest/autorest/date v0.2.0
+ github.com/Azure/go-autorest/autorest/mocks v0.3.0
+ github.com/Azure/go-autorest/tracing v0.5.0
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
new file mode 100644
index 0000000..f0a0185
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
@@ -0,0 +1,23 @@
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
new file mode 100644
index 0000000..28a4bfc
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest/autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
new file mode 100644
index 0000000..9e15f27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
@@ -0,0 +1,73 @@
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// LoadToken restores a Token object from a file located at 'path'.
+func LoadToken(path string) (*Token, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
+ }
+ defer file.Close()
+
+ var token Token
+
+ dec := json.NewDecoder(file)
+ if err = dec.Decode(&token); err != nil {
+ return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
+ }
+ return &token, nil
+}
+
+// SaveToken persists an oauth token at the given location on disk.
+// It moves the new file into place so it can safely be used to replace an existing file
+// that maybe accessed by multiple processes.
+func SaveToken(path string, mode os.FileMode, token Token) error {
+ dir := filepath.Dir(path)
+ err := os.MkdirAll(dir, os.ModePerm)
+ if err != nil {
+ return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
+ }
+
+ newFile, err := ioutil.TempFile(dir, "token")
+ if err != nil {
+ return fmt.Errorf("failed to create the temp file to write the token: %v", err)
+ }
+ tempPath := newFile.Name()
+
+ if err := json.NewEncoder(newFile).Encode(token); err != nil {
+ return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
+ }
+ if err := newFile.Close(); err != nil {
+ return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
+ }
+
+ // Atomic replace to avoid multi-writer file corruptions
+ if err := os.Rename(tempPath, path); err != nil {
+ return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
+ }
+ if err := os.Chmod(path, mode); err != nil {
+ return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
new file mode 100644
index 0000000..d7e4372
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
@@ -0,0 +1,95 @@
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/http/cookiejar"
+ "sync"
+
+ "github.com/Azure/go-autorest/tracing"
+)
+
+const (
+ contentType = "Content-Type"
+ mimeTypeFormPost = "application/x-www-form-urlencoded"
+)
+
+var defaultSender Sender
+var defaultSenderInit = &sync.Once{}
+
+// Sender is the interface that wraps the Do method to send HTTP requests.
+//
+// The standard http.Client conforms to this interface.
+type Sender interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// SenderFunc is a method that implements the Sender interface.
+type SenderFunc func(*http.Request) (*http.Response, error)
+
+// Do implements the Sender interface on SenderFunc.
+func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
+ return sf(r)
+}
+
+// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
+// http.Request and pass it along or, first, pass the http.Request along then react to the
+// http.Response result.
+type SendDecorator func(Sender) Sender
+
+// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
+func CreateSender(decorators ...SendDecorator) Sender {
+ return DecorateSender(sender(), decorators...)
+}
+
+// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
+// the Sender. Decorators are applied in the order received, but their affect upon the request
+// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
+// post-decorator (pass the http.Request along and react to the results in http.Response).
+func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
+ for _, decorate := range decorators {
+ s = decorate(s)
+ }
+ return s
+}
+
+func sender() Sender {
+ // note that we can't init defaultSender in init() since it will
+ // execute before calling code has had a chance to enable tracing
+ defaultSenderInit.Do(func() {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ transport := &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ defaultSender = &http.Client{Jar: j, Transport: roundTripper}
+ })
+ return defaultSender
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
new file mode 100644
index 0000000..7c7fca3
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
@@ -0,0 +1,1112 @@
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/dgrijalva/jwt-go"
+)
+
+const (
+ defaultRefresh = 5 * time.Minute
+
+ // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
+ OAuthGrantTypeDeviceCode = "device_code"
+
+ // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
+ OAuthGrantTypeClientCredentials = "client_credentials"
+
+ // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows
+ OAuthGrantTypeUserPass = "password"
+
+ // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
+ OAuthGrantTypeRefreshToken = "refresh_token"
+
+ // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows
+ OAuthGrantTypeAuthorizationCode = "authorization_code"
+
+ // metadataHeader is the header required by MSI extension
+ metadataHeader = "Metadata"
+
+ // msiEndpoint is the well known endpoint for getting MSI authentications tokens
+ msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
+
+ // the default number of attempts to refresh an MSI authentication token
+ defaultMaxMSIRefreshAttempts = 5
+
+ // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions
+ asMSIEndpointEnv = "MSI_ENDPOINT"
+
+ // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
+ asMSISecretEnv = "MSI_SECRET"
+)
+
+// OAuthTokenProvider is an interface which should be implemented by an access token retriever
+type OAuthTokenProvider interface {
+ OAuthToken() string
+}
+
+// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization.
+type MultitenantOAuthTokenProvider interface {
+ PrimaryOAuthToken() string
+ AuxiliaryOAuthTokens() []string
+}
+
+// TokenRefreshError is an interface used by errors returned during token refresh.
+type TokenRefreshError interface {
+ error
+ Response() *http.Response
+}
+
+// Refresher is an interface for token refresh functionality
+type Refresher interface {
+ Refresh() error
+ RefreshExchange(resource string) error
+ EnsureFresh() error
+}
+
+// RefresherWithContext is an interface for token refresh functionality
+type RefresherWithContext interface {
+ RefreshWithContext(ctx context.Context) error
+ RefreshExchangeWithContext(ctx context.Context, resource string) error
+ EnsureFreshWithContext(ctx context.Context) error
+}
+
+// TokenRefreshCallback is the type representing callbacks that will be called after
+// a successful token refresh
+type TokenRefreshCallback func(Token) error
+
+// Token encapsulates the access token used to authorize Azure requests.
+// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
+type Token struct {
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+
+ ExpiresIn json.Number `json:"expires_in"`
+ ExpiresOn json.Number `json:"expires_on"`
+ NotBefore json.Number `json:"not_before"`
+
+ Resource string `json:"resource"`
+ Type string `json:"token_type"`
+}
+
+func newToken() Token {
+ return Token{
+ ExpiresIn: "0",
+ ExpiresOn: "0",
+ NotBefore: "0",
+ }
+}
+
+// IsZero returns true if the token object is zero-initialized.
+func (t Token) IsZero() bool {
+ return t == Token{}
+}
+
+// Expires returns the time.Time when the Token expires.
+func (t Token) Expires() time.Time {
+ s, err := t.ExpiresOn.Float64()
+ if err != nil {
+ s = -3600
+ }
+
+ expiration := date.NewUnixTimeFromSeconds(s)
+
+ return time.Time(expiration).UTC()
+}
+
+// IsExpired returns true if the Token is expired, false otherwise.
+func (t Token) IsExpired() bool {
+ return t.WillExpireIn(0)
+}
+
+// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
+// from now, false otherwise.
+func (t Token) WillExpireIn(d time.Duration) bool {
+ return !t.Expires().After(time.Now().Add(d))
+}
+
+//OAuthToken return the current access token
+func (t *Token) OAuthToken() string {
+ return t.AccessToken
+}
+
+// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
+// that is submitted when acquiring an oAuth token.
+type ServicePrincipalSecret interface {
+ SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
+}
+
+// ServicePrincipalNoSecret represents a secret type that contains no secret
+// meaning it is not valid for fetching a fresh token. This is used by Manual
+type ServicePrincipalNoSecret struct {
+}
+
+// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
+// It only returns an error for the ServicePrincipalNoSecret type
+func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
+ return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) {
+ type tokenType struct {
+ Type string `json:"type"`
+ }
+ return json.Marshal(tokenType{
+ Type: "ServicePrincipalNoSecret",
+ })
+}
+
+// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
+type ServicePrincipalTokenSecret struct {
+ ClientSecret string `json:"value"`
+}
+
+// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
+// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
+func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
+ v.Set("client_secret", tokenSecret.ClientSecret)
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) {
+ type tokenType struct {
+ Type string `json:"type"`
+ Value string `json:"value"`
+ }
+ return json.Marshal(tokenType{
+ Type: "ServicePrincipalTokenSecret",
+ Value: tokenSecret.ClientSecret,
+ })
+}
+
+// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
+type ServicePrincipalCertificateSecret struct {
+ Certificate *x509.Certificate
+ PrivateKey *rsa.PrivateKey
+}
+
+// SignJwt returns the JWT signed with the certificate's private key.
+func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
+ hasher := sha1.New()
+ _, err := hasher.Write(secret.Certificate.Raw)
+ if err != nil {
+ return "", err
+ }
+
+ thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
+
+ // The jti (JWT ID) claim provides a unique identifier for the JWT.
+ jti := make([]byte, 20)
+ _, err = rand.Read(jti)
+ if err != nil {
+ return "", err
+ }
+
+ token := jwt.New(jwt.SigningMethodRS256)
+ token.Header["x5t"] = thumbprint
+ x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
+ token.Header["x5c"] = x5c
+ token.Claims = jwt.MapClaims{
+ "aud": spt.inner.OauthConfig.TokenEndpoint.String(),
+ "iss": spt.inner.ClientID,
+ "sub": spt.inner.ClientID,
+ "jti": base64.URLEncoding.EncodeToString(jti),
+ "nbf": time.Now().Unix(),
+ "exp": time.Now().Add(time.Hour * 24).Unix(),
+ }
+
+ signedString, err := token.SignedString(secret.PrivateKey)
+ return signedString, err
+}
+
+// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
+// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
+func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
+ jwt, err := secret.SignJwt(spt)
+ if err != nil {
+ return err
+ }
+
+ v.Set("client_assertion", jwt)
+ v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) {
+ return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported")
+}
+
+// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
+type ServicePrincipalMSISecret struct {
+}
+
+// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
+func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) {
+ return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported")
+}
+
+// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth.
+type ServicePrincipalUsernamePasswordSecret struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+}
+
+// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
+func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
+ v.Set("username", secret.Username)
+ v.Set("password", secret.Password)
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) {
+ type tokenType struct {
+ Type string `json:"type"`
+ Username string `json:"username"`
+ Password string `json:"password"`
+ }
+ return json.Marshal(tokenType{
+ Type: "ServicePrincipalUsernamePasswordSecret",
+ Username: secret.Username,
+ Password: secret.Password,
+ })
+}
+
+// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth.
+type ServicePrincipalAuthorizationCodeSecret struct {
+ ClientSecret string `json:"value"`
+ AuthorizationCode string `json:"authCode"`
+ RedirectURI string `json:"redirect"`
+}
+
+// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
+func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
+ v.Set("code", secret.AuthorizationCode)
+ v.Set("client_secret", secret.ClientSecret)
+ v.Set("redirect_uri", secret.RedirectURI)
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) {
+ type tokenType struct {
+ Type string `json:"type"`
+ Value string `json:"value"`
+ AuthCode string `json:"authCode"`
+ Redirect string `json:"redirect"`
+ }
+ return json.Marshal(tokenType{
+ Type: "ServicePrincipalAuthorizationCodeSecret",
+ Value: secret.ClientSecret,
+ AuthCode: secret.AuthorizationCode,
+ Redirect: secret.RedirectURI,
+ })
+}
+
+// ServicePrincipalToken encapsulates a Token created for a Service Principal.
+type ServicePrincipalToken struct {
+ inner servicePrincipalToken
+ refreshLock *sync.RWMutex
+ sender Sender
+ refreshCallbacks []TokenRefreshCallback
+ // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token.
+ MaxMSIRefreshAttempts int
+}
+
+// MarshalTokenJSON returns the marshalled inner token.
+func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) {
+ return json.Marshal(spt.inner.Token)
+}
+
+// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks.
+func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) {
+ spt.refreshCallbacks = callbacks
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) {
+ return json.Marshal(spt.inner)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
+ // need to determine the token type
+ raw := map[string]interface{}{}
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+ secret := raw["secret"].(map[string]interface{})
+ switch secret["type"] {
+ case "ServicePrincipalNoSecret":
+ spt.inner.Secret = &ServicePrincipalNoSecret{}
+ case "ServicePrincipalTokenSecret":
+ spt.inner.Secret = &ServicePrincipalTokenSecret{}
+ case "ServicePrincipalCertificateSecret":
+ return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported")
+ case "ServicePrincipalMSISecret":
+ return errors.New("unmarshalling ServicePrincipalMSISecret is not supported")
+ case "ServicePrincipalUsernamePasswordSecret":
+ spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{}
+ case "ServicePrincipalAuthorizationCodeSecret":
+ spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{}
+ default:
+ return fmt.Errorf("unrecognized token type '%s'", secret["type"])
+ }
+ err = json.Unmarshal(data, &spt.inner)
+ if err != nil {
+ return err
+ }
+ // Don't override the refreshLock or the sender if those have been already set.
+ if spt.refreshLock == nil {
+ spt.refreshLock = &sync.RWMutex{}
+ }
+ if spt.sender == nil {
+ spt.sender = sender()
+ }
+ return nil
+}
+
+// internal type used for marshalling/unmarshalling
+type servicePrincipalToken struct {
+ Token Token `json:"token"`
+ Secret ServicePrincipalSecret `json:"secret"`
+ OauthConfig OAuthConfig `json:"oauth"`
+ ClientID string `json:"clientID"`
+ Resource string `json:"resource"`
+ AutoRefresh bool `json:"autoRefresh"`
+ RefreshWithin time.Duration `json:"refreshWithin"`
+}
+
+func validateOAuthConfig(oac OAuthConfig) error {
+ if oac.IsZero() {
+ return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized")
+ }
+ return nil
+}
+
+// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
+func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(id, "id"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ if secret == nil {
+ return nil, fmt.Errorf("parameter 'secret' cannot be nil")
+ }
+ spt := &ServicePrincipalToken{
+ inner: servicePrincipalToken{
+ Token: newToken(),
+ OauthConfig: oauthConfig,
+ Secret: secret,
+ ClientID: id,
+ Resource: resource,
+ AutoRefresh: true,
+ RefreshWithin: defaultRefresh,
+ },
+ refreshLock: &sync.RWMutex{},
+ sender: sender(),
+ refreshCallbacks: callbacks,
+ }
+ return spt, nil
+}
+
+// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
+func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ if token.IsZero() {
+ return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized")
+ }
+ spt, err := NewServicePrincipalTokenWithSecret(
+ oauthConfig,
+ clientID,
+ resource,
+ &ServicePrincipalNoSecret{},
+ callbacks...)
+ if err != nil {
+ return nil, err
+ }
+
+ spt.inner.Token = token
+
+ return spt, nil
+}
+
+// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret
+func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ if secret == nil {
+ return nil, fmt.Errorf("parameter 'secret' cannot be nil")
+ }
+ if token.IsZero() {
+ return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized")
+ }
+ spt, err := NewServicePrincipalTokenWithSecret(
+ oauthConfig,
+ clientID,
+ resource,
+ secret,
+ callbacks...)
+ if err != nil {
+ return nil, err
+ }
+
+ spt.inner.Token = token
+
+ return spt, nil
+}
+
+// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
+// credentials scoped to the named resource.
+func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(secret, "secret"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ return NewServicePrincipalTokenWithSecret(
+ oauthConfig,
+ clientID,
+ resource,
+ &ServicePrincipalTokenSecret{
+ ClientSecret: secret,
+ },
+ callbacks...,
+ )
+}
+
+// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes.
+func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ if certificate == nil {
+ return nil, fmt.Errorf("parameter 'certificate' cannot be nil")
+ }
+ if privateKey == nil {
+ return nil, fmt.Errorf("parameter 'privateKey' cannot be nil")
+ }
+ return NewServicePrincipalTokenWithSecret(
+ oauthConfig,
+ clientID,
+ resource,
+ &ServicePrincipalCertificateSecret{
+ PrivateKey: privateKey,
+ Certificate: certificate,
+ },
+ callbacks...,
+ )
+}
+
+// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password.
+func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(username, "username"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(password, "password"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ return NewServicePrincipalTokenWithSecret(
+ oauthConfig,
+ clientID,
+ resource,
+ &ServicePrincipalUsernamePasswordSecret{
+ Username: username,
+ Password: password,
+ },
+ callbacks...,
+ )
+}
+
+// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the
+func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+
+ if err := validateOAuthConfig(oauthConfig); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(clientSecret, "clientSecret"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(redirectURI, "redirectURI"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+
+ return NewServicePrincipalTokenWithSecret(
+ oauthConfig,
+ clientID,
+ resource,
+ &ServicePrincipalAuthorizationCodeSecret{
+ ClientSecret: clientSecret,
+ AuthorizationCode: authorizationCode,
+ RedirectURI: redirectURI,
+ },
+ callbacks...,
+ )
+}
+
+// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
+func GetMSIVMEndpoint() (string, error) {
+ return msiEndpoint, nil
+}
+
+func isAppService() bool {
+ _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
+ _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
+
+ return asMSIEndpointEnvExists && asMSISecretEnvExists
+}
+
+// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions
+func GetMSIAppServiceEndpoint() (string, error) {
+ asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
+
+ if asMSIEndpointEnvExists {
+ return asMSIEndpoint, nil
+ }
+ return "", errors.New("MSI endpoint not found")
+}
+
+// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment
+func GetMSIEndpoint() (string, error) {
+ if isAppService() {
+ return GetMSIAppServiceEndpoint()
+ }
+ return GetMSIVMEndpoint()
+}
+
+// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
+// It will use the system assigned identity when creating the token.
+func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
+}
+
+// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
+// It will use the specified user assigned identity when creating the token.
+func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
+}
+
+func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ if userAssignedID != nil {
+ if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil {
+ return nil, err
+ }
+ }
+ // We set the oauth config token endpoint to be MSI's endpoint
+ msiEndpointURL, err := url.Parse(msiEndpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ v := url.Values{}
+ v.Set("resource", resource)
+ // App Service MSI currently only supports token API version 2017-09-01
+ if isAppService() {
+ v.Set("api-version", "2017-09-01")
+ } else {
+ v.Set("api-version", "2018-02-01")
+ }
+ if userAssignedID != nil {
+ v.Set("client_id", *userAssignedID)
+ }
+ msiEndpointURL.RawQuery = v.Encode()
+
+ spt := &ServicePrincipalToken{
+ inner: servicePrincipalToken{
+ Token: newToken(),
+ OauthConfig: OAuthConfig{
+ TokenEndpoint: *msiEndpointURL,
+ },
+ Secret: &ServicePrincipalMSISecret{},
+ Resource: resource,
+ AutoRefresh: true,
+ RefreshWithin: defaultRefresh,
+ },
+ refreshLock: &sync.RWMutex{},
+ sender: sender(),
+ refreshCallbacks: callbacks,
+ MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
+ }
+
+ if userAssignedID != nil {
+ spt.inner.ClientID = *userAssignedID
+ }
+
+ return spt, nil
+}
+
+// internal type that implements TokenRefreshError
+type tokenRefreshError struct {
+ message string
+ resp *http.Response
+}
+
+// Error implements the error interface which is part of the TokenRefreshError interface.
+func (tre tokenRefreshError) Error() string {
+ return tre.message
+}
+
+// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation.
+func (tre tokenRefreshError) Response() *http.Response {
+ return tre.resp
+}
+
+func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError {
+ return tokenRefreshError{message: message, resp: resp}
+}
+
+// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
+// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
+func (spt *ServicePrincipalToken) EnsureFresh() error {
+ return spt.EnsureFreshWithContext(context.Background())
+}
+
+// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
+// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
+func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
+ if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
+ // take the write lock then check to see if the token was already refreshed
+ spt.refreshLock.Lock()
+ defer spt.refreshLock.Unlock()
+ if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
+ return spt.refreshInternal(ctx, spt.inner.Resource)
+ }
+ }
+ return nil
+}
+
+// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
+func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
+ if spt.refreshCallbacks != nil {
+ for _, callback := range spt.refreshCallbacks {
+ err := callback(spt.inner.Token)
+ if err != nil {
+ return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err)
+ }
+ }
+ }
+ return nil
+}
+
+// Refresh obtains a fresh token for the Service Principal.
+// This method is not safe for concurrent use and should be syncrhonized.
+func (spt *ServicePrincipalToken) Refresh() error {
+ return spt.RefreshWithContext(context.Background())
+}
+
+// RefreshWithContext obtains a fresh token for the Service Principal.
+// This method is not safe for concurrent use and should be syncrhonized.
+func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
+ spt.refreshLock.Lock()
+ defer spt.refreshLock.Unlock()
+ return spt.refreshInternal(ctx, spt.inner.Resource)
+}
+
+// RefreshExchange refreshes the token, but for a different resource.
+// This method is not safe for concurrent use and should be syncrhonized.
+func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
+ return spt.RefreshExchangeWithContext(context.Background(), resource)
+}
+
+// RefreshExchangeWithContext refreshes the token, but for a different resource.
+// This method is not safe for concurrent use and should be syncrhonized.
+func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
+ spt.refreshLock.Lock()
+ defer spt.refreshLock.Unlock()
+ return spt.refreshInternal(ctx, resource)
+}
+
+func (spt *ServicePrincipalToken) getGrantType() string {
+ switch spt.inner.Secret.(type) {
+ case *ServicePrincipalUsernamePasswordSecret:
+ return OAuthGrantTypeUserPass
+ case *ServicePrincipalAuthorizationCodeSecret:
+ return OAuthGrantTypeAuthorizationCode
+ default:
+ return OAuthGrantTypeClientCredentials
+ }
+}
+
+func isIMDS(u url.URL) bool {
+ imds, err := url.Parse(msiEndpoint)
+ if err != nil {
+ return false
+ }
+ return (u.Host == imds.Host && u.Path == imds.Path) || isAppService()
+}
+
+func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
+ req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil)
+ if err != nil {
+ return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
+ }
+ req.Header.Add("User-Agent", UserAgent())
+ // Add header when runtime is on App Service or Functions
+ if isAppService() {
+ asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
+ req.Header.Add("Secret", asMSISecret)
+ }
+ req = req.WithContext(ctx)
+ if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
+ v := url.Values{}
+ v.Set("client_id", spt.inner.ClientID)
+ v.Set("resource", resource)
+
+ if spt.inner.Token.RefreshToken != "" {
+ v.Set("grant_type", OAuthGrantTypeRefreshToken)
+ v.Set("refresh_token", spt.inner.Token.RefreshToken)
+ // web apps must specify client_secret when refreshing tokens
+ // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens
+ if spt.getGrantType() == OAuthGrantTypeAuthorizationCode {
+ err := spt.inner.Secret.SetAuthenticationValues(spt, &v)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ v.Set("grant_type", spt.getGrantType())
+ err := spt.inner.Secret.SetAuthenticationValues(spt, &v)
+ if err != nil {
+ return err
+ }
+ }
+
+ s := v.Encode()
+ body := ioutil.NopCloser(strings.NewReader(s))
+ req.ContentLength = int64(len(s))
+ req.Header.Set(contentType, mimeTypeFormPost)
+ req.Body = body
+ }
+
+ if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
+ req.Method = http.MethodGet
+ req.Header.Set(metadataHeader, "true")
+ }
+
+ var resp *http.Response
+ if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
+ resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
+ } else {
+ resp, err = spt.sender.Do(req)
+ }
+ if err != nil {
+ // don't return a TokenRefreshError here; this will allow retry logic to apply
+ return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
+ }
+
+ defer resp.Body.Close()
+ rb, err := ioutil.ReadAll(resp.Body)
+
+ if resp.StatusCode != http.StatusOK {
+ if err != nil {
+ return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp)
+ }
+ return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp)
+ }
+
+ // for the following error cases don't return a TokenRefreshError. the operation succeeded
+ // but some transient failure happened during deserialization. by returning a generic error
+ // the retry logic will kick in (we don't retry on TokenRefreshError).
+
+ if err != nil {
+ return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err)
+ }
+ if len(strings.Trim(string(rb), " ")) == 0 {
+ return fmt.Errorf("adal: Empty service principal token received during refresh")
+ }
+ var token Token
+ err = json.Unmarshal(rb, &token)
+ if err != nil {
+ return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb))
+ }
+
+ spt.inner.Token = token
+
+ return spt.InvokeRefreshCallbacks(token)
+}
+
+// retry logic specific to retrieving a token from the IMDS endpoint
+func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) {
+ // copied from client.go due to circular dependency
+ retries := []int{
+ http.StatusRequestTimeout, // 408
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+ }
+ // extra retry status codes specific to IMDS
+ retries = append(retries,
+ http.StatusNotFound,
+ http.StatusGone,
+ // all remaining 5xx
+ http.StatusNotImplemented,
+ http.StatusHTTPVersionNotSupported,
+ http.StatusVariantAlsoNegotiates,
+ http.StatusInsufficientStorage,
+ http.StatusLoopDetected,
+ http.StatusNotExtended,
+ http.StatusNetworkAuthenticationRequired)
+
+ // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance
+
+ const maxDelay time.Duration = 60 * time.Second
+
+ attempt := 0
+ delay := time.Duration(0)
+
+ for attempt < maxAttempts {
+ resp, err = sender.Do(req)
+ // we want to retry if err is not nil or the status code is in the list of retry codes
+ if err == nil && !responseHasStatusCode(resp, retries...) {
+ return
+ }
+
+ // perform exponential backoff with a cap.
+ // must increment attempt before calculating delay.
+ attempt++
+ // the base value of 2 is the "delta backoff" as specified in the guidance doc
+ delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second)
+ if delay > maxDelay {
+ delay = maxDelay
+ }
+
+ select {
+ case <-time.After(delay):
+ // intentionally left blank
+ case <-req.Context().Done():
+ err = req.Context().Err()
+ return
+ }
+ }
+ return
+}
+
+func responseHasStatusCode(resp *http.Response, codes ...int) bool {
+ if resp != nil {
+ for _, i := range codes {
+ if i == resp.StatusCode {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
+func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
+ spt.inner.AutoRefresh = autoRefresh
+}
+
+// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
+// refresh the token.
+func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
+ spt.inner.RefreshWithin = d
+ return
+}
+
+// SetSender sets the http.Client used when obtaining the Service Principal token. An
+// undecorated http.Client is used by default.
+func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s }
+
+// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token.
+func (spt *ServicePrincipalToken) OAuthToken() string {
+ spt.refreshLock.RLock()
+ defer spt.refreshLock.RUnlock()
+ return spt.inner.Token.OAuthToken()
+}
+
+// Token returns a copy of the current token.
+func (spt *ServicePrincipalToken) Token() Token {
+ spt.refreshLock.RLock()
+ defer spt.refreshLock.RUnlock()
+ return spt.inner.Token
+}
+
+// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization.
+type MultiTenantServicePrincipalToken struct {
+ PrimaryToken *ServicePrincipalToken
+ AuxiliaryTokens []*ServicePrincipalToken
+}
+
+// PrimaryOAuthToken returns the primary authorization token.
+func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string {
+ return mt.PrimaryToken.OAuthToken()
+}
+
+// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens.
+func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
+ tokens := make([]string, len(mt.AuxiliaryTokens))
+ for i := range mt.AuxiliaryTokens {
+ tokens[i] = mt.AuxiliaryTokens[i].OAuthToken()
+ }
+ return tokens
+}
+
+// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
+// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
+func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
+ if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.EnsureFreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// RefreshWithContext obtains a fresh token for the Service Principal.
+func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
+ if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.RefreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// RefreshExchangeWithContext refreshes the token, but for a different resource.
+func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
+ if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
+func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(secret, "secret"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ auxTenants := multiTenantCfg.AuxiliaryTenants()
+ m := MultiTenantServicePrincipalToken{
+ AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
+ }
+ primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
+ }
+ m.PrimaryToken = primary
+ for i := range auxTenants {
+ aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
+ }
+ m.AuxiliaryTokens[i] = aux
+ }
+ return &m, nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go
new file mode 100644
index 0000000..c867b34
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go
@@ -0,0 +1,45 @@
+package adal
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const number = "v1.0.0"
+
+var (
+ ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
+ runtime.Version(),
+ runtime.GOARCH,
+ runtime.GOOS,
+ number,
+ )
+)
+
+// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
+func UserAgent() string {
+ return ua
+}
+
+// AddToUserAgent adds an extension to the current user agent
+func AddToUserAgent(extension string) error {
+ if extension != "" {
+ ua = fmt.Sprintf("%s %s", ua, extension)
+ return nil
+ }
+ return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
new file mode 100644
index 0000000..54e87b5
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
@@ -0,0 +1,336 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "crypto/tls"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/Azure/go-autorest/autorest/adal"
+)
+
+const (
+ bearerChallengeHeader = "Www-Authenticate"
+ bearer = "Bearer"
+ tenantID = "tenantID"
+ apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
+ bingAPISdkHeader = "X-BingApis-SDK-Client"
+ golangBingAPISdkHeaderValue = "Go-SDK"
+ authorization = "Authorization"
+ basic = "Basic"
+)
+
+// Authorizer is the interface that provides a PrepareDecorator used to supply request
+// authorization. Most often, the Authorizer decorator runs last so it has access to the full
+// state of the formed HTTP request.
+type Authorizer interface {
+ WithAuthorization() PrepareDecorator
+}
+
+// NullAuthorizer implements a default, "do nothing" Authorizer.
+type NullAuthorizer struct{}
+
+// WithAuthorization returns a PrepareDecorator that does nothing.
+func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
+ return WithNothing()
+}
+
+// APIKeyAuthorizer implements API Key authorization.
+type APIKeyAuthorizer struct {
+ headers map[string]interface{}
+ queryParameters map[string]interface{}
+}
+
+// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
+func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer {
+ return NewAPIKeyAuthorizer(headers, nil)
+}
+
+// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
+func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer {
+ return NewAPIKeyAuthorizer(nil, queryParameters)
+}
+
+// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
+func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer {
+ return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
+func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
+ }
+}
+
+// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
+type CognitiveServicesAuthorizer struct {
+ subscriptionKey string
+}
+
+// NewCognitiveServicesAuthorizer is
+func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer {
+ return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey}
+}
+
+// WithAuthorization is
+func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator {
+ headers := make(map[string]interface{})
+ headers[apiKeyAuthorizerHeader] = csa.subscriptionKey
+ headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue
+
+ return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
+}
+
+// BearerAuthorizer implements the bearer authorization
+type BearerAuthorizer struct {
+ tokenProvider adal.OAuthTokenProvider
+}
+
+// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
+func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer {
+ return &BearerAuthorizer{tokenProvider: tp}
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
+// value is "Bearer " followed by the token.
+//
+// By default, the token will be automatically refreshed through the Refresher interface.
+func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ // the ordering is important here, prefer RefresherWithContext if available
+ if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok {
+ err = refresher.EnsureFreshWithContext(r.Context())
+ } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok {
+ err = refresher.EnsureFresh()
+ }
+ if err != nil {
+ var resp *http.Response
+ if tokError, ok := err.(adal.TokenRefreshError); ok {
+ resp = tokError.Response()
+ }
+ return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
+ "Failed to refresh the Token for request to %s", r.URL)
+ }
+ return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())))
+ }
+ return r, err
+ })
+ }
+}
+
+// BearerAuthorizerCallbackFunc is the authentication callback signature.
+type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
+
+// BearerAuthorizerCallback implements bearer authorization via a callback.
+type BearerAuthorizerCallback struct {
+ sender Sender
+ callback BearerAuthorizerCallbackFunc
+}
+
+// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
+// is invoked when the HTTP request is submitted.
+func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
+ if s == nil {
+ s = sender(tls.RenegotiateNever)
+ }
+ return &BearerAuthorizerCallback{sender: s, callback: callback}
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
+// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
+//
+// By default, the token will be automatically refreshed through the Refresher interface.
+func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ // make a copy of the request and remove the body as it's not
+ // required and avoids us having to create a copy of it.
+ rCopy := *r
+ removeRequestBody(&rCopy)
+
+ resp, err := bacb.sender.Do(&rCopy)
+ if err == nil && resp.StatusCode == 401 {
+ defer resp.Body.Close()
+ if hasBearerChallenge(resp) {
+ bc, err := newBearerChallenge(resp)
+ if err != nil {
+ return r, err
+ }
+ if bacb.callback != nil {
+ ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
+ if err != nil {
+ return r, err
+ }
+ return Prepare(r, ba.WithAuthorization())
+ }
+ }
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// returns true if the HTTP response contains a bearer challenge
+func hasBearerChallenge(resp *http.Response) bool {
+ authHeader := resp.Header.Get(bearerChallengeHeader)
+ if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
+ return false
+ }
+ return true
+}
+
+type bearerChallenge struct {
+ values map[string]string
+}
+
+func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
+ challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
+ trimmedChallenge := challenge[len(bearer)+1:]
+
+ // challenge is a set of key=value pairs that are comma delimited
+ pairs := strings.Split(trimmedChallenge, ",")
+ if len(pairs) < 1 {
+ err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
+ return bc, err
+ }
+
+ bc.values = make(map[string]string)
+ for i := range pairs {
+ trimmedPair := strings.TrimSpace(pairs[i])
+ pair := strings.Split(trimmedPair, "=")
+ if len(pair) == 2 {
+ // remove the enclosing quotes
+ key := strings.Trim(pair[0], "\"")
+ value := strings.Trim(pair[1], "\"")
+
+ switch key {
+ case "authorization", "authorization_uri":
+ // strip the tenant ID from the authorization URL
+ asURL, err := url.Parse(value)
+ if err != nil {
+ return bc, err
+ }
+ bc.values[tenantID] = asURL.Path[1:]
+ default:
+ bc.values[key] = value
+ }
+ }
+ }
+
+ return bc, err
+}
+
+// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
+type EventGridKeyAuthorizer struct {
+ topicKey string
+}
+
+// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
+// with the specified topic key.
+func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer {
+ return EventGridKeyAuthorizer{topicKey: topicKey}
+}
+
+// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
+func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
+ headers := map[string]interface{}{
+ "aeg-sas-key": egta.topicKey,
+ }
+ return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
+}
+
+// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
+// with the value "Basic " where is a base64-encoded username:password tuple.
+type BasicAuthorizer struct {
+ userName string
+ password string
+}
+
+// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
+func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
+ return &BasicAuthorizer{
+ userName: userName,
+ password: password,
+ }
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
+// value is "Basic " followed by the base64-encoded username:password tuple.
+func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
+ headers := make(map[string]interface{})
+ headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
+
+ return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
+}
+
+// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
+type MultiTenantServicePrincipalTokenAuthorizer interface {
+ WithAuthorization() PrepareDecorator
+}
+
+// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
+func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
+ return &multiTenantSPTAuthorizer{tp: tp}
+}
+
+type multiTenantSPTAuthorizer struct {
+ tp adal.MultitenantOAuthTokenProvider
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
+// primary token along with the auxiliary authorization header using the auxiliary tokens.
+//
+// By default, the token will be automatically refreshed through the Refresher interface.
+func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+ if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
+ err = refresher.EnsureFreshWithContext(r.Context())
+ if err != nil {
+ var resp *http.Response
+ if tokError, ok := err.(adal.TokenRefreshError); ok {
+ resp = tokError.Response()
+ }
+ return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
+ "Failed to refresh one or more Tokens for request to %s", r.URL)
+ }
+ }
+ r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
+ if err != nil {
+ return r, err
+ }
+ auxTokens := mt.tp.AuxiliaryOAuthTokens()
+ for i := range auxTokens {
+ auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
+ }
+ return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; ")))
+ })
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go
new file mode 100644
index 0000000..aafdf02
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go
@@ -0,0 +1,150 @@
+/*
+Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
+and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
+generated Go code.
+
+The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
+and Responding. A typical pattern is:
+
+ req, err := Prepare(&http.Request{},
+ token.WithAuthorization())
+
+ resp, err := Send(req,
+ WithLogging(logger),
+ DoErrorIfStatusCode(http.StatusInternalServerError),
+ DoCloseIfError(),
+ DoRetryForAttempts(5, time.Second))
+
+ err = Respond(resp,
+ ByDiscardingBody(),
+ ByClosing())
+
+Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
+and then pass the data along, pass the data first and then modify the result, or wrap themselves
+around passing the data (such as a logger might do). Decorators run in the order provided. For
+example, the following:
+
+ req, err := Prepare(&http.Request{},
+ WithBaseURL("https://microsoft.com/"),
+ WithPath("a"),
+ WithPath("b"),
+ WithPath("c"))
+
+will set the URL to:
+
+ https://microsoft.com/a/b/c
+
+Preparers and Responders may be shared and re-used (assuming the underlying decorators support
+sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
+shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
+all bound together by means of input / output channels.
+
+Decorators hold their passed state within a closure (such as the path components in the example
+above). Be careful to share Preparers and Responders only in a context where such held state
+applies. For example, it may not make sense to share a Preparer that applies a query string from a
+fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
+struct (e.g., ByUnmarshallingJson) is likely incorrect.
+
+Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
+(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
+github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
+correct parsing and formatting.
+
+Errors raised by autorest objects and methods will conform to the autorest.Error interface.
+
+See the included examples for more detail. For details on the suggested use of this package by
+generated clients, see the Client described below.
+*/
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "context"
+ "net/http"
+ "time"
+)
+
+const (
+ // HeaderLocation specifies the HTTP Location header.
+ HeaderLocation = "Location"
+
+ // HeaderRetryAfter specifies the HTTP Retry-After header.
+ HeaderRetryAfter = "Retry-After"
+)
+
+// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
+// and false otherwise.
+func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
+ if resp == nil {
+ return false
+ }
+ return containsInt(codes, resp.StatusCode)
+}
+
+// GetLocation retrieves the URL from the Location header of the passed response.
+func GetLocation(resp *http.Response) string {
+ return resp.Header.Get(HeaderLocation)
+}
+
+// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
+// the header is absent or is malformed, it will return the supplied default delay time.Duration.
+func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
+ retry := resp.Header.Get(HeaderRetryAfter)
+ if retry == "" {
+ return defaultDelay
+ }
+
+ d, err := time.ParseDuration(retry + "s")
+ if err != nil {
+ return defaultDelay
+ }
+
+ return d
+}
+
+// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
+func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
+ location := GetLocation(resp)
+ if location == "" {
+ return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
+ }
+
+ req, err := Prepare(&http.Request{Cancel: cancel},
+ AsGet(),
+ WithBaseURL(location))
+ if err != nil {
+ return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
+ }
+
+ return req, nil
+}
+
+// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response.
+func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) {
+ location := GetLocation(resp)
+ if location == "" {
+ return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling")
+ }
+
+ req, err := Prepare((&http.Request{}).WithContext(ctx),
+ AsGet(),
+ WithBaseURL(location))
+ if err != nil {
+ return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location)
+ }
+
+ return req, nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
new file mode 100644
index 0000000..1cb41cb
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
@@ -0,0 +1,924 @@
+package azure
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/tracing"
+)
+
+const (
+ headerAsyncOperation = "Azure-AsyncOperation"
+)
+
+const (
+ operationInProgress string = "InProgress"
+ operationCanceled string = "Canceled"
+ operationFailed string = "Failed"
+ operationSucceeded string = "Succeeded"
+)
+
+var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
+
+// Future provides a mechanism to access the status and results of an asynchronous request.
+// Since futures are stateful they should be passed by value to avoid race conditions.
+type Future struct {
+ pt pollingTracker
+}
+
+// NewFutureFromResponse returns a new Future object initialized
+// with the initial response from an asynchronous operation.
+func NewFutureFromResponse(resp *http.Response) (Future, error) {
+ pt, err := createPollingTracker(resp)
+ return Future{pt: pt}, err
+}
+
+// Response returns the last HTTP response.
+func (f Future) Response() *http.Response {
+ if f.pt == nil {
+ return nil
+ }
+ return f.pt.latestResponse()
+}
+
+// Status returns the last status message of the operation.
+func (f Future) Status() string {
+ if f.pt == nil {
+ return ""
+ }
+ return f.pt.pollingStatus()
+}
+
+// PollingMethod returns the method used to monitor the status of the asynchronous operation.
+func (f Future) PollingMethod() PollingMethodType {
+ if f.pt == nil {
+ return PollingUnknown
+ }
+ return f.pt.pollingMethod()
+}
+
+// DoneWithContext queries the service to see if the operation has completed.
+func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
+ ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
+ defer func() {
+ sc := -1
+ resp := f.Response()
+ if resp != nil {
+ sc = resp.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+
+ if f.pt == nil {
+ return false, autorest.NewError("Future", "Done", "future is not initialized")
+ }
+ if f.pt.hasTerminated() {
+ return true, f.pt.pollingError()
+ }
+ if err := f.pt.pollForStatus(ctx, sender); err != nil {
+ return false, err
+ }
+ if err := f.pt.checkForErrors(); err != nil {
+ return f.pt.hasTerminated(), err
+ }
+ if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil {
+ return false, err
+ }
+ if err := f.pt.initPollingMethod(); err != nil {
+ return false, err
+ }
+ if err := f.pt.updatePollingMethod(); err != nil {
+ return false, err
+ }
+ return f.pt.hasTerminated(), f.pt.pollingError()
+}
+
+// GetPollingDelay returns a duration the application should wait before checking
+// the status of the asynchronous request and true; this value is returned from
+// the service via the Retry-After response header. If the header wasn't returned
+// then the function returns the zero-value time.Duration and false.
+func (f Future) GetPollingDelay() (time.Duration, bool) {
+ if f.pt == nil {
+ return 0, false
+ }
+ resp := f.pt.latestResponse()
+ if resp == nil {
+ return 0, false
+ }
+
+ retry := resp.Header.Get(autorest.HeaderRetryAfter)
+ if retry == "" {
+ return 0, false
+ }
+
+ d, err := time.ParseDuration(retry + "s")
+ if err != nil {
+ panic(err)
+ }
+
+ return d, true
+}
+
+// WaitForCompletionRef will return when one of the following conditions is met: the long
+// running operation has completed, the provided context is cancelled, or the client's
+// polling duration has been exceeded. It will retry failed polling attempts based on
+// the retry value defined in the client up to the maximum retry attempts.
+// If no deadline is specified in the context then the client.PollingDuration will be
+// used to determine if a default deadline should be used.
+// If PollingDuration is greater than zero the value will be used as the context's timeout.
+// If PollingDuration is zero then no default deadline will be used.
+func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
+ ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
+ defer func() {
+ sc := -1
+ resp := f.Response()
+ if resp != nil {
+ sc = resp.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ cancelCtx := ctx
+ // if the provided context already has a deadline don't override it
+ _, hasDeadline := ctx.Deadline()
+ if d := client.PollingDuration; !hasDeadline && d != 0 {
+ var cancel context.CancelFunc
+ cancelCtx, cancel = context.WithTimeout(ctx, d)
+ defer cancel()
+ }
+
+ done, err := f.DoneWithContext(ctx, client)
+ for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
+ if attempts >= client.RetryAttempts {
+ return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
+ }
+ // we want delayAttempt to be zero in the non-error case so
+ // that DelayForBackoff doesn't perform exponential back-off
+ var delayAttempt int
+ var delay time.Duration
+ if err == nil {
+ // check for Retry-After delay, if not present use the client's polling delay
+ var ok bool
+ delay, ok = f.GetPollingDelay()
+ if !ok {
+ delay = client.PollingDelay
+ }
+ } else {
+ // there was an error polling for status so perform exponential
+ // back-off based on the number of attempts using the client's retry
+ // duration. update attempts after delayAttempt to avoid off-by-one.
+ delayAttempt = attempts
+ delay = client.RetryDuration
+ attempts++
+ }
+ // wait until the delay elapses or the context is cancelled
+ delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
+ if !delayElapsed {
+ return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
+ }
+ }
+ return
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (f Future) MarshalJSON() ([]byte, error) {
+ return json.Marshal(f.pt)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (f *Future) UnmarshalJSON(data []byte) error {
+ // unmarshal into JSON object to determine the tracker type
+ obj := map[string]interface{}{}
+ err := json.Unmarshal(data, &obj)
+ if err != nil {
+ return err
+ }
+ if obj["method"] == nil {
+ return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property")
+ }
+ method := obj["method"].(string)
+ switch strings.ToUpper(method) {
+ case http.MethodDelete:
+ f.pt = &pollingTrackerDelete{}
+ case http.MethodPatch:
+ f.pt = &pollingTrackerPatch{}
+ case http.MethodPost:
+ f.pt = &pollingTrackerPost{}
+ case http.MethodPut:
+ f.pt = &pollingTrackerPut{}
+ default:
+ return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method)
+ }
+ // now unmarshal into the tracker
+ return json.Unmarshal(data, &f.pt)
+}
+
+// PollingURL returns the URL used for retrieving the status of the long-running operation.
+func (f Future) PollingURL() string {
+ if f.pt == nil {
+ return ""
+ }
+ return f.pt.pollingURL()
+}
+
+// GetResult should be called once polling has completed successfully.
+// It makes the final GET call to retrieve the resultant payload.
+func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) {
+ if f.pt.finalGetURL() == "" {
+ // we can end up in this situation if the async operation returns a 200
+ // with no polling URLs. in that case return the response which should
+ // contain the JSON payload (only do this for successful terminal cases).
+ if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() {
+ return lr, nil
+ }
+ return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result")
+ }
+ req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil)
+ if err != nil {
+ return nil, err
+ }
+ return sender.Do(req)
+}
+
+type pollingTracker interface {
+ // these methods can differ per tracker
+
+ // checks the response headers and status code to determine the polling mechanism
+ updatePollingMethod() error
+
+ // checks the response for tracker-specific error conditions
+ checkForErrors() error
+
+ // returns true if provisioning state should be checked
+ provisioningStateApplicable() bool
+
+ // methods common to all trackers
+
+ // initializes a tracker's polling URL and method, called for each iteration.
+ // these values can be overridden by each polling tracker as required.
+ initPollingMethod() error
+
+ // initializes the tracker's internal state, call this when the tracker is created
+ initializeState() error
+
+ // makes an HTTP request to check the status of the LRO
+ pollForStatus(ctx context.Context, sender autorest.Sender) error
+
+ // updates internal tracker state, call this after each call to pollForStatus
+ updatePollingState(provStateApl bool) error
+
+ // returns the error response from the service, can be nil
+ pollingError() error
+
+ // returns the polling method being used
+ pollingMethod() PollingMethodType
+
+ // returns the state of the LRO as returned from the service
+ pollingStatus() string
+
+ // returns the URL used for polling status
+ pollingURL() string
+
+ // returns the URL used for the final GET to retrieve the resource
+ finalGetURL() string
+
+ // returns true if the LRO is in a terminal state
+ hasTerminated() bool
+
+ // returns true if the LRO is in a failed terminal state
+ hasFailed() bool
+
+ // returns true if the LRO is in a successful terminal state
+ hasSucceeded() bool
+
+ // returns the cached HTTP response after a call to pollForStatus(), can be nil
+ latestResponse() *http.Response
+}
+
+type pollingTrackerBase struct {
+ // resp is the last response, either from the submission of the LRO or from polling
+ resp *http.Response
+
+ // method is the HTTP verb, this is needed for deserialization
+ Method string `json:"method"`
+
+ // rawBody is the raw JSON response body
+ rawBody map[string]interface{}
+
+ // denotes if polling is using async-operation or location header
+ Pm PollingMethodType `json:"pollingMethod"`
+
+ // the URL to poll for status
+ URI string `json:"pollingURI"`
+
+ // the state of the LRO as returned from the service
+ State string `json:"lroState"`
+
+ // the URL to GET for the final result
+ FinalGetURI string `json:"resultURI"`
+
+ // used to hold an error object returned from the service
+ Err *ServiceError `json:"error,omitempty"`
+}
+
+func (pt *pollingTrackerBase) initializeState() error {
+ // determine the initial polling state based on response body and/or HTTP status
+ // code. this is applicable to the initial LRO response, not polling responses!
+ pt.Method = pt.resp.Request.Method
+ if err := pt.updateRawBody(); err != nil {
+ return err
+ }
+ switch pt.resp.StatusCode {
+ case http.StatusOK:
+ if ps := pt.getProvisioningState(); ps != nil {
+ pt.State = *ps
+ if pt.hasFailed() {
+ pt.updateErrorFromResponse()
+ return pt.pollingError()
+ }
+ } else {
+ pt.State = operationSucceeded
+ }
+ case http.StatusCreated:
+ if ps := pt.getProvisioningState(); ps != nil {
+ pt.State = *ps
+ } else {
+ pt.State = operationInProgress
+ }
+ case http.StatusAccepted:
+ pt.State = operationInProgress
+ case http.StatusNoContent:
+ pt.State = operationSucceeded
+ default:
+ pt.State = operationFailed
+ pt.updateErrorFromResponse()
+ return pt.pollingError()
+ }
+ return pt.initPollingMethod()
+}
+
+func (pt pollingTrackerBase) getProvisioningState() *string {
+ if pt.rawBody != nil && pt.rawBody["properties"] != nil {
+ p := pt.rawBody["properties"].(map[string]interface{})
+ if ps := p["provisioningState"]; ps != nil {
+ s := ps.(string)
+ return &s
+ }
+ }
+ return nil
+}
+
+func (pt *pollingTrackerBase) updateRawBody() error {
+ pt.rawBody = map[string]interface{}{}
+ if pt.resp.ContentLength != 0 {
+ defer pt.resp.Body.Close()
+ b, err := ioutil.ReadAll(pt.resp.Body)
+ if err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
+ }
+ // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
+ if len(b) == 0 {
+ return nil
+ }
+ // put the body back so it's available to other callers
+ pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ if err = json.Unmarshal(b, &pt.rawBody); err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body")
+ }
+ }
+ return nil
+}
+
+func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
+ req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
+ if err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
+ }
+
+ req = req.WithContext(ctx)
+ preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...)
+ req, err = preparer.Prepare(req)
+ if err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request")
+ }
+ pt.resp, err = sender.Do(req)
+ if err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
+ }
+ if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) {
+ // reset the service error on success case
+ pt.Err = nil
+ err = pt.updateRawBody()
+ } else {
+ // check response body for error content
+ pt.updateErrorFromResponse()
+ err = pt.pollingError()
+ }
+ return err
+}
+
+// attempts to unmarshal a ServiceError type from the response body.
+// if that fails then make a best attempt at creating something meaningful.
+// NOTE: this assumes that the async operation has failed.
+func (pt *pollingTrackerBase) updateErrorFromResponse() {
+ var err error
+ if pt.resp.ContentLength != 0 {
+ type respErr struct {
+ ServiceError *ServiceError `json:"error"`
+ }
+ re := respErr{}
+ defer pt.resp.Body.Close()
+ var b []byte
+ if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
+ goto Default
+ }
+ if err = json.Unmarshal(b, &re); err != nil {
+ goto Default
+ }
+ // unmarshalling the error didn't yield anything, try unwrapped error
+ if re.ServiceError == nil {
+ err = json.Unmarshal(b, &re.ServiceError)
+ if err != nil {
+ goto Default
+ }
+ }
+ // the unmarshaller will ensure re.ServiceError is non-nil
+ // even if there was no content unmarshalled so check the code.
+ if re.ServiceError.Code != "" {
+ pt.Err = re.ServiceError
+ return
+ }
+ }
+Default:
+ se := &ServiceError{
+ Code: pt.pollingStatus(),
+ Message: "The async operation failed.",
+ }
+ if err != nil {
+ se.InnerError = make(map[string]interface{})
+ se.InnerError["unmarshalError"] = err.Error()
+ }
+ // stick the response body into the error object in hopes
+ // it contains something useful to help diagnose the failure.
+ if len(pt.rawBody) > 0 {
+ se.AdditionalInfo = []map[string]interface{}{
+ pt.rawBody,
+ }
+ }
+ pt.Err = se
+}
+
+func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error {
+ if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil {
+ pt.State = pt.rawBody["status"].(string)
+ } else {
+ if pt.resp.StatusCode == http.StatusAccepted {
+ pt.State = operationInProgress
+ } else if provStateApl {
+ if ps := pt.getProvisioningState(); ps != nil {
+ pt.State = *ps
+ } else {
+ pt.State = operationSucceeded
+ }
+ } else {
+ return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code")
+ }
+ }
+ // if the operation has failed update the error state
+ if pt.hasFailed() {
+ pt.updateErrorFromResponse()
+ }
+ return nil
+}
+
+func (pt pollingTrackerBase) pollingError() error {
+ if pt.Err == nil {
+ return nil
+ }
+ return pt.Err
+}
+
+func (pt pollingTrackerBase) pollingMethod() PollingMethodType {
+ return pt.Pm
+}
+
+func (pt pollingTrackerBase) pollingStatus() string {
+ return pt.State
+}
+
+func (pt pollingTrackerBase) pollingURL() string {
+ return pt.URI
+}
+
+func (pt pollingTrackerBase) finalGetURL() string {
+ return pt.FinalGetURI
+}
+
+func (pt pollingTrackerBase) hasTerminated() bool {
+ return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded)
+}
+
+func (pt pollingTrackerBase) hasFailed() bool {
+ return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed)
+}
+
+func (pt pollingTrackerBase) hasSucceeded() bool {
+ return strings.EqualFold(pt.State, operationSucceeded)
+}
+
+func (pt pollingTrackerBase) latestResponse() *http.Response {
+ return pt.resp
+}
+
+// error checking common to all trackers
+func (pt pollingTrackerBase) baseCheckForErrors() error {
+ // for Azure-AsyncOperations the response body cannot be nil or empty
+ if pt.Pm == PollingAsyncOperation {
+ if pt.resp.Body == nil || pt.resp.ContentLength == 0 {
+ return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil")
+ }
+ if pt.rawBody["status"] == nil {
+ return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body")
+ }
+ }
+ return nil
+}
+
+// default initialization of polling URL/method. each verb tracker will update this as required.
+func (pt *pollingTrackerBase) initPollingMethod() error {
+ if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ return nil
+ }
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
+ return err
+ } else if lh != "" {
+ pt.URI = lh
+ pt.Pm = PollingLocation
+ return nil
+ }
+ // it's ok if we didn't find a polling header, this will be handled elsewhere
+ return nil
+}
+
+// DELETE
+
+type pollingTrackerDelete struct {
+ pollingTrackerBase
+}
+
+func (pt *pollingTrackerDelete) updatePollingMethod() error {
+ // for 201 the Location header is required
+ if pt.resp.StatusCode == http.StatusCreated {
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
+ return err
+ } else if lh == "" {
+ return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response")
+ } else {
+ pt.URI = lh
+ }
+ pt.Pm = PollingLocation
+ pt.FinalGetURI = pt.URI
+ }
+ // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
+ if pt.resp.StatusCode == http.StatusAccepted {
+ ao, err := getURLFromAsyncOpHeader(pt.resp)
+ if err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ }
+ // if the Location header is invalid and we already have a polling URL
+ // then we don't care if the Location header URL is malformed.
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" {
+ return err
+ } else if lh != "" {
+ if ao == "" {
+ pt.URI = lh
+ pt.Pm = PollingLocation
+ }
+ // when both headers are returned we use the value in the Location header for the final GET
+ pt.FinalGetURI = lh
+ }
+ // make sure a polling URL was found
+ if pt.URI == "" {
+ return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
+ }
+ }
+ return nil
+}
+
+func (pt pollingTrackerDelete) checkForErrors() error {
+ return pt.baseCheckForErrors()
+}
+
+func (pt pollingTrackerDelete) provisioningStateApplicable() bool {
+ return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent
+}
+
+// PATCH
+
+type pollingTrackerPatch struct {
+ pollingTrackerBase
+}
+
+func (pt *pollingTrackerPatch) updatePollingMethod() error {
+ // by default we can use the original URL for polling and final GET
+ if pt.URI == "" {
+ pt.URI = pt.resp.Request.URL.String()
+ }
+ if pt.FinalGetURI == "" {
+ pt.FinalGetURI = pt.resp.Request.URL.String()
+ }
+ if pt.Pm == PollingUnknown {
+ pt.Pm = PollingRequestURI
+ }
+ // for 201 it's permissible for no headers to be returned
+ if pt.resp.StatusCode == http.StatusCreated {
+ if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ }
+ }
+ // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
+ // note the absence of the "final GET" mechanism for PATCH
+ if pt.resp.StatusCode == http.StatusAccepted {
+ ao, err := getURLFromAsyncOpHeader(pt.resp)
+ if err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ }
+ if ao == "" {
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
+ return err
+ } else if lh == "" {
+ return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
+ } else {
+ pt.URI = lh
+ pt.Pm = PollingLocation
+ }
+ }
+ }
+ return nil
+}
+
+func (pt pollingTrackerPatch) checkForErrors() error {
+ return pt.baseCheckForErrors()
+}
+
+func (pt pollingTrackerPatch) provisioningStateApplicable() bool {
+ return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated
+}
+
+// POST
+
+type pollingTrackerPost struct {
+ pollingTrackerBase
+}
+
+func (pt *pollingTrackerPost) updatePollingMethod() error {
+ // 201 requires Location header
+ if pt.resp.StatusCode == http.StatusCreated {
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
+ return err
+ } else if lh == "" {
+ return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response")
+ } else {
+ pt.URI = lh
+ pt.FinalGetURI = lh
+ pt.Pm = PollingLocation
+ }
+ }
+ // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
+ if pt.resp.StatusCode == http.StatusAccepted {
+ ao, err := getURLFromAsyncOpHeader(pt.resp)
+ if err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ }
+ // if the Location header is invalid and we already have a polling URL
+ // then we don't care if the Location header URL is malformed.
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" {
+ return err
+ } else if lh != "" {
+ if ao == "" {
+ pt.URI = lh
+ pt.Pm = PollingLocation
+ }
+ // when both headers are returned we use the value in the Location header for the final GET
+ pt.FinalGetURI = lh
+ }
+ // make sure a polling URL was found
+ if pt.URI == "" {
+ return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
+ }
+ }
+ return nil
+}
+
+func (pt pollingTrackerPost) checkForErrors() error {
+ return pt.baseCheckForErrors()
+}
+
+func (pt pollingTrackerPost) provisioningStateApplicable() bool {
+ return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent
+}
+
+// PUT
+
+type pollingTrackerPut struct {
+ pollingTrackerBase
+}
+
+func (pt *pollingTrackerPut) updatePollingMethod() error {
+ // by default we can use the original URL for polling and final GET
+ if pt.URI == "" {
+ pt.URI = pt.resp.Request.URL.String()
+ }
+ if pt.FinalGetURI == "" {
+ pt.FinalGetURI = pt.resp.Request.URL.String()
+ }
+ if pt.Pm == PollingUnknown {
+ pt.Pm = PollingRequestURI
+ }
+ // for 201 it's permissible for no headers to be returned
+ if pt.resp.StatusCode == http.StatusCreated {
+ if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ }
+ }
+ // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
+ if pt.resp.StatusCode == http.StatusAccepted {
+ ao, err := getURLFromAsyncOpHeader(pt.resp)
+ if err != nil {
+ return err
+ } else if ao != "" {
+ pt.URI = ao
+ pt.Pm = PollingAsyncOperation
+ }
+ // if the Location header is invalid and we already have a polling URL
+ // then we don't care if the Location header URL is malformed.
+ if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" {
+ return err
+ } else if lh != "" {
+ if ao == "" {
+ pt.URI = lh
+ pt.Pm = PollingLocation
+ }
+ }
+ // make sure a polling URL was found
+ if pt.URI == "" {
+ return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response")
+ }
+ }
+ return nil
+}
+
+func (pt pollingTrackerPut) checkForErrors() error {
+ err := pt.baseCheckForErrors()
+ if err != nil {
+ return err
+ }
+ // if there are no LRO headers then the body cannot be empty
+ ao, err := getURLFromAsyncOpHeader(pt.resp)
+ if err != nil {
+ return err
+ }
+ lh, err := getURLFromLocationHeader(pt.resp)
+ if err != nil {
+ return err
+ }
+ if ao == "" && lh == "" && len(pt.rawBody) == 0 {
+ return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body")
+ }
+ return nil
+}
+
+func (pt pollingTrackerPut) provisioningStateApplicable() bool {
+ return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated
+}
+
+// creates a polling tracker based on the verb of the original request
+func createPollingTracker(resp *http.Response) (pollingTracker, error) {
+ var pt pollingTracker
+ switch strings.ToUpper(resp.Request.Method) {
+ case http.MethodDelete:
+ pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}}
+ case http.MethodPatch:
+ pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}}
+ case http.MethodPost:
+ pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}}
+ case http.MethodPut:
+ pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}}
+ default:
+ return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method)
+ }
+ if err := pt.initializeState(); err != nil {
+ return pt, err
+ }
+ // this initializes the polling header values, we do this during creation in case the
+ // initial response send us invalid values; this way the API call will return a non-nil
+ // error (not doing this means the error shows up in Future.Done)
+ return pt, pt.updatePollingMethod()
+}
+
+// gets the polling URL from the Azure-AsyncOperation header.
+// ensures the URL is well-formed and absolute.
+func getURLFromAsyncOpHeader(resp *http.Response) (string, error) {
+ s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
+ if s == "" {
+ return "", nil
+ }
+ if !isValidURL(s) {
+ return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s)
+ }
+ return s, nil
+}
+
+// gets the polling URL from the Location header.
+// ensures the URL is well-formed and absolute.
+func getURLFromLocationHeader(resp *http.Response) (string, error) {
+ s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation))
+ if s == "" {
+ return "", nil
+ }
+ if !isValidURL(s) {
+ return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s)
+ }
+ return s, nil
+}
+
+// verify that the URL is valid and absolute
+func isValidURL(s string) bool {
+ u, err := url.Parse(s)
+ return err == nil && u.IsAbs()
+}
+
+// PollingMethodType defines a type used for enumerating polling mechanisms.
+type PollingMethodType string
+
+const (
+ // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header.
+ PollingAsyncOperation PollingMethodType = "AsyncOperation"
+
+ // PollingLocation indicates the polling method uses the Location header.
+ PollingLocation PollingMethodType = "Location"
+
+ // PollingRequestURI indicates the polling method uses the original request URI.
+ PollingRequestURI PollingMethodType = "RequestURI"
+
+ // PollingUnknown indicates an unknown polling method and is the default value.
+ PollingUnknown PollingMethodType = ""
+)
+
+// AsyncOpIncompleteError is the type that's returned from a future that has not completed.
+type AsyncOpIncompleteError struct {
+ // FutureType is the name of the type composed of a azure.Future.
+ FutureType string
+}
+
+// Error returns an error message including the originating type name of the error.
+func (e AsyncOpIncompleteError) Error() string {
+ return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType)
+}
+
+// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters.
+func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError {
+ return AsyncOpIncompleteError{
+ FutureType: futureType,
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
new file mode 100644
index 0000000..5f02026
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
@@ -0,0 +1,737 @@
+package auth
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "unicode/utf16"
+
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/adal"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/azure/cli"
+ "github.com/dimchansky/utfbom"
+ "golang.org/x/crypto/pkcs12"
+)
+
+// The possible keys in the Values map.
+const (
+ SubscriptionID = "AZURE_SUBSCRIPTION_ID"
+ TenantID = "AZURE_TENANT_ID"
+ AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS"
+ ClientID = "AZURE_CLIENT_ID"
+ ClientSecret = "AZURE_CLIENT_SECRET"
+ CertificatePath = "AZURE_CERTIFICATE_PATH"
+ CertificatePassword = "AZURE_CERTIFICATE_PASSWORD"
+ Username = "AZURE_USERNAME"
+ Password = "AZURE_PASSWORD"
+ EnvironmentName = "AZURE_ENVIRONMENT"
+ Resource = "AZURE_AD_RESOURCE"
+ ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint"
+ ResourceManagerEndpoint = "ResourceManagerEndpoint"
+ GraphResourceID = "GraphResourceID"
+ SQLManagementEndpoint = "SQLManagementEndpoint"
+ GalleryEndpoint = "GalleryEndpoint"
+ ManagementEndpoint = "ManagementEndpoint"
+)
+
+// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order:
+// 1. Client credentials
+// 2. Client certificate
+// 3. Username password
+// 4. MSI
+func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
+ settings, err := GetSettingsFromEnvironment()
+ if err != nil {
+ return nil, err
+ }
+ return settings.GetAuthorizer()
+}
+
+// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order:
+// 1. Client credentials
+// 2. Client certificate
+// 3. Username password
+// 4. MSI
+func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {
+ settings, err := GetSettingsFromEnvironment()
+ if err != nil {
+ return nil, err
+ }
+ settings.Values[Resource] = resource
+ return settings.GetAuthorizer()
+}
+
+// EnvironmentSettings contains the available authentication settings.
+type EnvironmentSettings struct {
+ Values map[string]string
+ Environment azure.Environment
+}
+
+// GetSettingsFromEnvironment returns the available authentication settings from the environment.
+func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) {
+ s = EnvironmentSettings{
+ Values: map[string]string{},
+ }
+ s.setValue(SubscriptionID)
+ s.setValue(TenantID)
+ s.setValue(AuxiliaryTenantIDs)
+ s.setValue(ClientID)
+ s.setValue(ClientSecret)
+ s.setValue(CertificatePath)
+ s.setValue(CertificatePassword)
+ s.setValue(Username)
+ s.setValue(Password)
+ s.setValue(EnvironmentName)
+ s.setValue(Resource)
+ if v := s.Values[EnvironmentName]; v == "" {
+ s.Environment = azure.PublicCloud
+ } else {
+ s.Environment, err = azure.EnvironmentFromName(v)
+ }
+ if s.Values[Resource] == "" {
+ s.Values[Resource] = s.Environment.ResourceManagerEndpoint
+ }
+ return
+}
+
+// GetSubscriptionID returns the available subscription ID or an empty string.
+func (settings EnvironmentSettings) GetSubscriptionID() string {
+ return settings.Values[SubscriptionID]
+}
+
+// adds the specified environment variable value to the Values map if it exists
+func (settings EnvironmentSettings) setValue(key string) {
+ if v := os.Getenv(key); v != "" {
+ settings.Values[key] = v
+ }
+}
+
+// helper to return client and tenant IDs
+func (settings EnvironmentSettings) getClientAndTenant() (string, string) {
+ clientID := settings.Values[ClientID]
+ tenantID := settings.Values[TenantID]
+ return clientID, tenantID
+}
+
+// GetClientCredentials creates a config object from the available client credentials.
+// An error is returned if no client credentials are available.
+func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) {
+ secret := settings.Values[ClientSecret]
+ if secret == "" {
+ return ClientCredentialsConfig{}, errors.New("missing client secret")
+ }
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewClientCredentialsConfig(clientID, secret, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok {
+ config.AuxTenants = strings.Split(auxTenants, ";")
+ for i := range config.AuxTenants {
+ config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i])
+ }
+ }
+ return config, nil
+}
+
+// GetClientCertificate creates a config object from the available certificate credentials.
+// An error is returned if no certificate credentials are available.
+func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) {
+ certPath := settings.Values[CertificatePath]
+ if certPath == "" {
+ return ClientCertificateConfig{}, errors.New("missing certificate path")
+ }
+ certPwd := settings.Values[CertificatePassword]
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config, nil
+}
+
+// GetUsernamePassword creates a config object from the available username/password credentials.
+// An error is returned if no username/password credentials are available.
+func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) {
+ username := settings.Values[Username]
+ password := settings.Values[Password]
+ if username == "" || password == "" {
+ return UsernamePasswordConfig{}, errors.New("missing username/password")
+ }
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewUsernamePasswordConfig(username, password, clientID, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config, nil
+}
+
+// GetMSI creates a MSI config object from the available client ID.
+func (settings EnvironmentSettings) GetMSI() MSIConfig {
+ config := NewMSIConfig()
+ config.Resource = settings.Values[Resource]
+ config.ClientID = settings.Values[ClientID]
+ return config
+}
+
+// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs.
+func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig {
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewDeviceFlowConfig(clientID, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config
+}
+
+// GetAuthorizer creates an Authorizer configured from environment variables in the order:
+// 1. Client credentials
+// 2. Client certificate
+// 3. Username password
+// 4. MSI
+func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
+ //1.Client Credentials
+ if c, e := settings.GetClientCredentials(); e == nil {
+ return c.Authorizer()
+ }
+
+ //2. Client Certificate
+ if c, e := settings.GetClientCertificate(); e == nil {
+ return c.Authorizer()
+ }
+
+ //3. Username Password
+ if c, e := settings.GetUsernamePassword(); e == nil {
+ return c.Authorizer()
+ }
+
+ // 4. MSI
+ return settings.GetMSI().Authorizer()
+}
+
+// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order.
+// 1. Client credentials
+// 2. Client certificate
+func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
+ settings, err := GetSettingsFromFile()
+ if err != nil {
+ return nil, err
+ }
+ if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil {
+ return a, err
+ }
+ if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil {
+ return a, err
+ }
+ return nil, errors.New("auth file missing client and certificate credentials")
+}
+
+// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order.
+// 1. Client credentials
+// 2. Client certificate
+func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
+ s, err := GetSettingsFromFile()
+ if err != nil {
+ return nil, err
+ }
+ if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil {
+ return a, err
+ }
+ if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil {
+ return a, err
+ }
+ return nil, errors.New("auth file missing client and certificate credentials")
+}
+
+// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
+func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
+ settings, err := GetSettingsFromEnvironment()
+ if err != nil {
+ return nil, err
+ }
+
+ if settings.Values[Resource] == "" {
+ settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint
+ }
+
+ return NewAuthorizerFromCLIWithResource(settings.Values[Resource])
+}
+
+// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
+func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {
+ token, err := cli.GetTokenFromCLI(resource)
+ if err != nil {
+ return nil, err
+ }
+
+ adalToken, err := token.ToADALToken()
+ if err != nil {
+ return nil, err
+ }
+
+ return autorest.NewBearerAuthorizer(&adalToken), nil
+}
+
+// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file.
+func GetSettingsFromFile() (FileSettings, error) {
+ s := FileSettings{}
+ fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
+ if fileLocation == "" {
+ return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
+ }
+
+ contents, err := ioutil.ReadFile(fileLocation)
+ if err != nil {
+ return s, err
+ }
+
+ // Auth file might be encoded
+ decoded, err := decode(contents)
+ if err != nil {
+ return s, err
+ }
+
+ authFile := map[string]interface{}{}
+ err = json.Unmarshal(decoded, &authFile)
+ if err != nil {
+ return s, err
+ }
+
+ s.Values = map[string]string{}
+ s.setKeyValue(ClientID, authFile["clientId"])
+ s.setKeyValue(ClientSecret, authFile["clientSecret"])
+ s.setKeyValue(CertificatePath, authFile["clientCertificate"])
+ s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"])
+ s.setKeyValue(SubscriptionID, authFile["subscriptionId"])
+ s.setKeyValue(TenantID, authFile["tenantId"])
+ s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"])
+ s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"])
+ s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"])
+ s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"])
+ s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"])
+ s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"])
+ return s, nil
+}
+
+// FileSettings contains the available authentication settings.
+type FileSettings struct {
+ Values map[string]string
+}
+
+// GetSubscriptionID returns the available subscription ID or an empty string.
+func (settings FileSettings) GetSubscriptionID() string {
+ return settings.Values[SubscriptionID]
+}
+
+// adds the specified value to the Values map if it isn't nil
+func (settings FileSettings) setKeyValue(key string, val interface{}) {
+ if val != nil {
+ settings.Values[key] = val.(string)
+ }
+}
+
+// returns the specified AAD endpoint or the public cloud endpoint if unspecified
+func (settings FileSettings) getAADEndpoint() string {
+ if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok {
+ return v
+ }
+ return azure.PublicCloud.ActiveDirectoryEndpoint
+}
+
+// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials.
+func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) {
+ resource, err := settings.getResourceForToken(baseURI)
+ if err != nil {
+ return nil, err
+ }
+ return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
+}
+
+// ClientCredentialsAuthorizer creates an authorizer from the available client credentials.
+func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {
+ resource, err := settings.getResourceForToken(baseURI)
+ if err != nil {
+ return nil, err
+ }
+ return settings.ClientCredentialsAuthorizerWithResource(resource)
+}
+
+// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken
+// from the available client credentials and the specified resource.
+func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) {
+ if _, ok := settings.Values[ClientSecret]; !ok {
+ return nil, errors.New("missing client secret")
+ }
+ config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID])
+ if err != nil {
+ return nil, err
+ }
+ return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource)
+}
+
+func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) {
+ if _, ok := settings.Values[CertificatePath]; !ok {
+ return ClientCertificateConfig{}, errors.New("missing certificate path")
+ }
+ cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID])
+ cfg.AADEndpoint = settings.getAADEndpoint()
+ cfg.Resource = resource
+ return cfg, nil
+}
+
+// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource.
+func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
+ spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
+ if err != nil {
+ return nil, err
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
+
+// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials.
+func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) {
+ resource, err := settings.getResourceForToken(baseURI)
+ if err != nil {
+ return nil, err
+ }
+ return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource)
+}
+
+// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials.
+func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) {
+ resource, err := settings.getResourceForToken(baseURI)
+ if err != nil {
+ return nil, err
+ }
+ return settings.ClientCertificateAuthorizerWithResource(resource)
+}
+
+// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials.
+func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) {
+ cfg, err := settings.clientCertificateConfigWithResource(resource)
+ if err != nil {
+ return nil, err
+ }
+ return cfg.ServicePrincipalToken()
+}
+
+// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource.
+func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
+ cfg, err := settings.clientCertificateConfigWithResource(resource)
+ if err != nil {
+ return nil, err
+ }
+ return cfg.Authorizer()
+}
+
+func decode(b []byte) ([]byte, error) {
+ reader, enc := utfbom.Skip(bytes.NewReader(b))
+
+ switch enc {
+ case utfbom.UTF16LittleEndian:
+ u16 := make([]uint16, (len(b)/2)-1)
+ err := binary.Read(reader, binary.LittleEndian, &u16)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(string(utf16.Decode(u16))), nil
+ case utfbom.UTF16BigEndian:
+ u16 := make([]uint16, (len(b)/2)-1)
+ err := binary.Read(reader, binary.BigEndian, &u16)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(string(utf16.Decode(u16))), nil
+ }
+ return ioutil.ReadAll(reader)
+}
+
+func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
+ // Compare dafault base URI from the SDK to the endpoints from the public cloud
+ // Base URI and token resource are the same string. This func finds the authentication
+ // file field that matches the SDK base URI. The SDK defines the public cloud
+ // endpoint as its default base URI
+ if !strings.HasSuffix(baseURI, "/") {
+ baseURI += "/"
+ }
+ switch baseURI {
+ case azure.PublicCloud.ServiceManagementEndpoint:
+ return settings.Values[ManagementEndpoint], nil
+ case azure.PublicCloud.ResourceManagerEndpoint:
+ return settings.Values[ResourceManagerEndpoint], nil
+ case azure.PublicCloud.ActiveDirectoryEndpoint:
+ return settings.Values[ActiveDirectoryEndpoint], nil
+ case azure.PublicCloud.GalleryEndpoint:
+ return settings.Values[GalleryEndpoint], nil
+ case azure.PublicCloud.GraphEndpoint:
+ return settings.Values[GraphResourceID], nil
+ }
+ return "", fmt.Errorf("auth: base URI not found in endpoints")
+}
+
+// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials.
+// Defaults to Public Cloud and Resource Manager Endpoint.
+func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig {
+ return ClientCredentialsConfig{
+ ClientID: clientID,
+ ClientSecret: clientSecret,
+ TenantID: tenantID,
+ Resource: azure.PublicCloud.ResourceManagerEndpoint,
+ AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
+ }
+}
+
+// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate.
+// Defaults to Public Cloud and Resource Manager Endpoint.
+func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig {
+ return ClientCertificateConfig{
+ CertificatePath: certificatePath,
+ CertificatePassword: certificatePassword,
+ ClientID: clientID,
+ TenantID: tenantID,
+ Resource: azure.PublicCloud.ResourceManagerEndpoint,
+ AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
+ }
+}
+
+// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password.
+// Defaults to Public Cloud and Resource Manager Endpoint.
+func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig {
+ return UsernamePasswordConfig{
+ Username: username,
+ Password: password,
+ ClientID: clientID,
+ TenantID: tenantID,
+ Resource: azure.PublicCloud.ResourceManagerEndpoint,
+ AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
+ }
+}
+
+// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI.
+func NewMSIConfig() MSIConfig {
+ return MSIConfig{
+ Resource: azure.PublicCloud.ResourceManagerEndpoint,
+ }
+}
+
+// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow.
+// Defaults to Public Cloud and Resource Manager Endpoint.
+func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig {
+ return DeviceFlowConfig{
+ ClientID: clientID,
+ TenantID: tenantID,
+ Resource: azure.PublicCloud.ResourceManagerEndpoint,
+ AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
+ }
+}
+
+//AuthorizerConfig provides an authorizer from the configuration provided.
+type AuthorizerConfig interface {
+ Authorizer() (autorest.Authorizer, error)
+}
+
+// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials.
+type ClientCredentialsConfig struct {
+ ClientID string
+ ClientSecret string
+ TenantID string
+ AuxTenants []string
+ AADEndpoint string
+ Resource string
+}
+
+// ServicePrincipalToken creates a ServicePrincipalToken from client credentials.
+func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
+ oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
+ if err != nil {
+ return nil, err
+ }
+ return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
+}
+
+// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials.
+func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) {
+ oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
+}
+
+// Authorizer gets the authorizer from client credentials.
+func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
+ if len(ccc.AuxTenants) == 0 {
+ spToken, err := ccc.ServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err)
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
+ }
+ mtSPT, err := ccc.MultiTenantServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err)
+ }
+ return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil
+}
+
+// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate.
+type ClientCertificateConfig struct {
+ ClientID string
+ CertificatePath string
+ CertificatePassword string
+ TenantID string
+ AADEndpoint string
+ Resource string
+}
+
+// ServicePrincipalToken creates a ServicePrincipalToken from client certificate.
+func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
+ oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
+ if err != nil {
+ return nil, err
+ }
+ certData, err := ioutil.ReadFile(ccc.CertificatePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
+ }
+ certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
+ }
+ return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
+}
+
+// Authorizer gets an authorizer object from client certificate.
+func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := ccc.ServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err)
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
+
+// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication.
+type DeviceFlowConfig struct {
+ ClientID string
+ TenantID string
+ AADEndpoint string
+ Resource string
+}
+
+// Authorizer gets the authorizer from device flow.
+func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := dfc.ServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
+
+// ServicePrincipalToken gets the service principal token from device flow.
+func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
+ oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)
+ if err != nil {
+ return nil, err
+ }
+ oauthClient := &autorest.Client{}
+ deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to start device auth flow: %s", err)
+ }
+ log.Println(*deviceCode.Message)
+ token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
+ if err != nil {
+ return nil, fmt.Errorf("failed to finish device auth flow: %s", err)
+ }
+ return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
+}
+
+func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
+ privateKey, certificate, err := pkcs12.Decode(pkcs, password)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
+ if !isRsaKey {
+ return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key")
+ }
+
+ return certificate, rsaPrivateKey, nil
+}
+
+// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password.
+type UsernamePasswordConfig struct {
+ ClientID string
+ Username string
+ Password string
+ TenantID string
+ AADEndpoint string
+ Resource string
+}
+
+// ServicePrincipalToken creates a ServicePrincipalToken from username and password.
+func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
+ oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
+ if err != nil {
+ return nil, err
+ }
+ return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
+}
+
+// Authorizer gets the authorizer from a username and a password.
+func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := ups.ServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err)
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
+
+// MSIConfig provides the options to get a bearer authorizer through MSI.
+type MSIConfig struct {
+ Resource string
+ ClientID string
+}
+
+// Authorizer gets the authorizer from MSI.
+func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
+ msiEndpoint, err := adal.GetMSIEndpoint()
+ if err != nil {
+ return nil, err
+ }
+
+ var spToken *adal.ServicePrincipalToken
+ if mc.ClientID == "" {
+ spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
+ }
+ } else {
+ spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err)
+ }
+ }
+
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod
new file mode 100644
index 0000000..43fa1b7
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest/azure/auth
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest v0.9.2
+ github.com/Azure/go-autorest/autorest/adal v0.7.0
+ github.com/Azure/go-autorest/autorest/azure/cli v0.3.0
+ github.com/dimchansky/utfbom v1.1.0
+ golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum
new file mode 100644
index 0000000..c462a7d
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum
@@ -0,0 +1,38 @@
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
+github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo=
+github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI=
+github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk=
+golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go
new file mode 100644
index 0000000..2f09cd1
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package auth
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest/autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
new file mode 100644
index 0000000..3a0a439
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
@@ -0,0 +1,326 @@
+// Package azure provides Azure-specific implementations used with AutoRest.
+// See the included examples for more detail.
+package azure
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // HeaderClientID is the Azure extension header to set a user-specified request ID.
+ HeaderClientID = "x-ms-client-request-id"
+
+ // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
+ // should be included in the response.
+ HeaderReturnClientID = "x-ms-return-client-request-id"
+
+ // HeaderRequestID is the Azure extension header of the service generated request ID returned
+ // in the response.
+ HeaderRequestID = "x-ms-request-id"
+)
+
+// ServiceError encapsulates the error response from an Azure service.
+// It adhears to the OData v4 specification for error responses.
+type ServiceError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Target *string `json:"target"`
+ Details []map[string]interface{} `json:"details"`
+ InnerError map[string]interface{} `json:"innererror"`
+ AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
+}
+
+func (se ServiceError) Error() string {
+ result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
+
+ if se.Target != nil {
+ result += fmt.Sprintf(" Target=%q", *se.Target)
+ }
+
+ if se.Details != nil {
+ d, err := json.Marshal(se.Details)
+ if err != nil {
+ result += fmt.Sprintf(" Details=%v", se.Details)
+ }
+ result += fmt.Sprintf(" Details=%v", string(d))
+ }
+
+ if se.InnerError != nil {
+ d, err := json.Marshal(se.InnerError)
+ if err != nil {
+ result += fmt.Sprintf(" InnerError=%v", se.InnerError)
+ }
+ result += fmt.Sprintf(" InnerError=%v", string(d))
+ }
+
+ if se.AdditionalInfo != nil {
+ d, err := json.Marshal(se.AdditionalInfo)
+ if err != nil {
+ result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo)
+ }
+ result += fmt.Sprintf(" AdditionalInfo=%v", string(d))
+ }
+
+ return result
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
+func (se *ServiceError) UnmarshalJSON(b []byte) error {
+ // per the OData v4 spec the details field must be an array of JSON objects.
+ // unfortunately not all services adhear to the spec and just return a single
+ // object instead of an array with one object. so we have to perform some
+ // shenanigans to accommodate both cases.
+ // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
+
+ type serviceError1 struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Target *string `json:"target"`
+ Details []map[string]interface{} `json:"details"`
+ InnerError map[string]interface{} `json:"innererror"`
+ AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
+ }
+
+ type serviceError2 struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Target *string `json:"target"`
+ Details map[string]interface{} `json:"details"`
+ InnerError map[string]interface{} `json:"innererror"`
+ AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
+ }
+
+ se1 := serviceError1{}
+ err := json.Unmarshal(b, &se1)
+ if err == nil {
+ se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo)
+ return nil
+ }
+
+ se2 := serviceError2{}
+ err = json.Unmarshal(b, &se2)
+ if err == nil {
+ se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo)
+ se.Details = append(se.Details, se2.Details)
+ return nil
+ }
+ return err
+}
+
+func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) {
+ se.Code = code
+ se.Message = message
+ se.Target = target
+ se.Details = details
+ se.InnerError = inner
+ se.AdditionalInfo = additional
+}
+
+// RequestError describes an error response returned by Azure service.
+type RequestError struct {
+ autorest.DetailedError
+
+ // The error returned by the Azure service.
+ ServiceError *ServiceError `json:"error"`
+
+ // The request id (from the x-ms-request-id-header) of the request.
+ RequestID string
+}
+
+// Error returns a human-friendly error message from service error.
+func (e RequestError) Error() string {
+ return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
+ e.StatusCode, e.ServiceError)
+}
+
+// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
+func IsAzureError(e error) bool {
+ _, ok := e.(*RequestError)
+ return ok
+}
+
+// Resource contains details about an Azure resource.
+type Resource struct {
+ SubscriptionID string
+ ResourceGroup string
+ Provider string
+ ResourceType string
+ ResourceName string
+}
+
+// ParseResourceID parses a resource ID into a ResourceDetails struct.
+// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4.
+func ParseResourceID(resourceID string) (Resource, error) {
+
+ const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)`
+ resourceIDPattern := regexp.MustCompile(resourceIDPatternText)
+ match := resourceIDPattern.FindStringSubmatch(resourceID)
+
+ if len(match) == 0 {
+ return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID)
+ }
+
+ v := strings.Split(match[5], "/")
+ resourceName := v[len(v)-1]
+
+ result := Resource{
+ SubscriptionID: match[1],
+ ResourceGroup: match[2],
+ Provider: match[3],
+ ResourceType: match[4],
+ ResourceName: resourceName,
+ }
+
+ return result, nil
+}
+
+// NewErrorWithError creates a new Error conforming object from the
+// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
+// if resp is nil), message, and original error. message is treated as a format
+// string to which the optional args apply.
+func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
+ if v, ok := original.(*RequestError); ok {
+ return *v
+ }
+
+ statusCode := autorest.UndefinedStatusCode
+ if resp != nil {
+ statusCode = resp.StatusCode
+ }
+ return RequestError{
+ DetailedError: autorest.DetailedError{
+ Original: original,
+ PackageType: packageType,
+ Method: method,
+ StatusCode: statusCode,
+ Message: fmt.Sprintf(message, args...),
+ },
+ }
+}
+
+// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
+// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
+// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
+// header to true such that UUID accompanies the http.Response.
+func WithReturningClientID(uuid string) autorest.PrepareDecorator {
+ preparer := autorest.CreatePreparer(
+ WithClientID(uuid),
+ WithReturnClientID(true))
+
+ return func(p autorest.Preparer) autorest.Preparer {
+ return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+ return preparer.Prepare(r)
+ })
+ }
+}
+
+// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
+// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
+// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
+func WithClientID(uuid string) autorest.PrepareDecorator {
+ return autorest.WithHeader(HeaderClientID, uuid)
+}
+
+// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
+// x-ms-return-client-request-id whose boolean value indicates if the value of the
+// x-ms-client-request-id header should be included in the http.Response.
+func WithReturnClientID(b bool) autorest.PrepareDecorator {
+ return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
+}
+
+// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
+// http.Request sent to the service (and returned in the http.Response)
+func ExtractClientID(resp *http.Response) string {
+ return autorest.ExtractHeaderValue(HeaderClientID, resp)
+}
+
+// ExtractRequestID extracts the Azure server generated request identifier from the
+// x-ms-request-id header.
+func ExtractRequestID(resp *http.Response) string {
+ return autorest.ExtractHeaderValue(HeaderRequestID, resp)
+}
+
+// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
+// azure.RequestError by reading the response body unless the response HTTP status code
+// is among the set passed.
+//
+// If there is a chance service may return responses other than the Azure error
+// format and the response cannot be parsed into an error, a decoding error will
+// be returned containing the response body. In any case, the Responder will
+// return an error if the status code is not satisfied.
+//
+// If this Responder returns an error, the response body will be replaced with
+// an in-memory reader, which needs no further closing.
+func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
+ return func(r autorest.Responder) autorest.Responder {
+ return autorest.ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
+ var e RequestError
+ defer resp.Body.Close()
+
+ // Copy and replace the Body in case it does not contain an error object.
+ // This will leave the Body available to the caller.
+ b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
+ resp.Body = ioutil.NopCloser(&b)
+ if decodeErr != nil {
+ return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
+ }
+ if e.ServiceError == nil {
+ // Check if error is unwrapped ServiceError
+ if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil {
+ return err
+ }
+ }
+ if e.ServiceError.Message == "" {
+ // if we're here it means the returned error wasn't OData v4 compliant.
+ // try to unmarshal the body as raw JSON in hopes of getting something.
+ rawBody := map[string]interface{}{}
+ if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil {
+ return err
+ }
+ e.ServiceError = &ServiceError{
+ Code: "Unknown",
+ Message: "Unknown service error",
+ }
+ if len(rawBody) > 0 {
+ e.ServiceError.Details = []map[string]interface{}{rawBody}
+ }
+ }
+ e.Response = resp
+ e.RequestID = ExtractRequestID(resp)
+ if e.StatusCode == nil {
+ e.StatusCode = resp.StatusCode
+ }
+ err = &e
+ }
+ return err
+ })
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod
new file mode 100644
index 0000000..03ad580
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest/azure/cli
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest v0.9.0
+ github.com/Azure/go-autorest/autorest/adal v0.6.0
+ github.com/Azure/go-autorest/autorest/date v0.2.0
+ github.com/dimchansky/utfbom v1.1.0
+ github.com/mitchellh/go-homedir v1.1.0
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum
new file mode 100644
index 0000000..7a8b1f2
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum
@@ -0,0 +1,29 @@
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo=
+github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go
new file mode 100644
index 0000000..618bed3
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package cli
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest/autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
new file mode 100644
index 0000000..a336b95
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
@@ -0,0 +1,79 @@
+package cli
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/dimchansky/utfbom"
+ "github.com/mitchellh/go-homedir"
+)
+
+// Profile represents a Profile from the Azure CLI
+type Profile struct {
+ InstallationID string `json:"installationId"`
+ Subscriptions []Subscription `json:"subscriptions"`
+}
+
+// Subscription represents a Subscription from the Azure CLI
+type Subscription struct {
+ EnvironmentName string `json:"environmentName"`
+ ID string `json:"id"`
+ IsDefault bool `json:"isDefault"`
+ Name string `json:"name"`
+ State string `json:"state"`
+ TenantID string `json:"tenantId"`
+ User *User `json:"user"`
+}
+
+// User represents a User from the Azure CLI
+type User struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+}
+
+const azureProfileJSON = "azureProfile.json"
+
+// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
+func ProfilePath() (string, error) {
+ if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" {
+ return filepath.Join(cfgDir, azureProfileJSON), nil
+ }
+ return homedir.Expand("~/.azure/" + azureProfileJSON)
+}
+
+// LoadProfile restores a Profile object from a file located at 'path'.
+func LoadProfile(path string) (result Profile, err error) {
+ var contents []byte
+ contents, err = ioutil.ReadFile(path)
+ if err != nil {
+ err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
+ return
+ }
+ reader := utfbom.SkipOnly(bytes.NewReader(contents))
+
+ dec := json.NewDecoder(reader)
+ if err = dec.Decode(&result); err != nil {
+ err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err)
+ return
+ }
+
+ return
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
new file mode 100644
index 0000000..810075b
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
@@ -0,0 +1,170 @@
+package cli
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strconv"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest/adal"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/mitchellh/go-homedir"
+)
+
+// Token represents an AccessToken from the Azure CLI
+type Token struct {
+ AccessToken string `json:"accessToken"`
+ Authority string `json:"_authority"`
+ ClientID string `json:"_clientId"`
+ ExpiresOn string `json:"expiresOn"`
+ IdentityProvider string `json:"identityProvider"`
+ IsMRRT bool `json:"isMRRT"`
+ RefreshToken string `json:"refreshToken"`
+ Resource string `json:"resource"`
+ TokenType string `json:"tokenType"`
+ UserID string `json:"userId"`
+}
+
+// ToADALToken converts an Azure CLI `Token`` to an `adal.Token``
+func (t Token) ToADALToken() (converted adal.Token, err error) {
+ tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn)
+ if err != nil {
+ err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err)
+ return
+ }
+
+ difference := tokenExpirationDate.Sub(date.UnixEpoch())
+
+ converted = adal.Token{
+ AccessToken: t.AccessToken,
+ Type: t.TokenType,
+ ExpiresIn: "3600",
+ ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))),
+ RefreshToken: t.RefreshToken,
+ Resource: t.Resource,
+ }
+ return
+}
+
+// AccessTokensPath returns the path where access tokens are stored from the Azure CLI
+// TODO(#199): add unit test.
+func AccessTokensPath() (string, error) {
+ // Azure-CLI allows user to customize the path of access tokens thorugh environment variable.
+ var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE")
+ var err error
+
+ // Fallback logic to default path on non-cloud-shell environment.
+ // TODO(#200): remove the dependency on hard-coding path.
+ if accessTokenPath == "" {
+ accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json")
+ }
+
+ return accessTokenPath, err
+}
+
+// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object
+func ParseExpirationDate(input string) (*time.Time, error) {
+ // CloudShell (and potentially the Azure CLI in future)
+ expirationDate, cloudShellErr := time.Parse(time.RFC3339, input)
+ if cloudShellErr != nil {
+ // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone)
+ const cliFormat = "2006-01-02 15:04:05.999999"
+ expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local)
+ if cliErr == nil {
+ return &expirationDate, nil
+ }
+
+ return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr)
+ }
+
+ return &expirationDate, nil
+}
+
+// LoadTokens restores a set of Token objects from a file located at 'path'.
+func LoadTokens(path string) ([]Token, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
+ }
+ defer file.Close()
+
+ var tokens []Token
+
+ dec := json.NewDecoder(file)
+ if err = dec.Decode(&tokens); err != nil {
+ return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err)
+ }
+
+ return tokens, nil
+}
+
+// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios.
+func GetTokenFromCLI(resource string) (*Token, error) {
+ // This is the path that a developer can set to tell this class what the install path for Azure CLI is.
+ const azureCLIPath = "AzureCLIPath"
+
+ // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI.
+ azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles"))
+
+ // Default path for non-Windows.
+ const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin"
+
+ // Validate resource, since it gets sent as a command line argument to Azure CLI
+ const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed."
+ match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource)
+ if err != nil {
+ return nil, err
+ }
+ if !match {
+ return nil, fmt.Errorf(invalidResourceErrorTemplate, resource)
+ }
+
+ // Execute Azure CLI to get token
+ var cliCmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir")))
+ cliCmd.Env = os.Environ()
+ cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows))
+ cliCmd.Args = append(cliCmd.Args, "/c", "az")
+ } else {
+ cliCmd = exec.Command("az")
+ cliCmd.Env = os.Environ()
+ cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath))
+ }
+ cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource)
+
+ var stderr bytes.Buffer
+ cliCmd.Stderr = &stderr
+
+ output, err := cliCmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String())
+ }
+
+ tokenResponse := Token{}
+ err = json.Unmarshal(output, &tokenResponse)
+ if err != nil {
+ return nil, err
+ }
+
+ return &tokenResponse, err
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
new file mode 100644
index 0000000..6c20b81
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -0,0 +1,244 @@
+package azure
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+const (
+ // EnvironmentFilepathName captures the name of the environment variable containing the path to the file
+ // to be used while populating the Azure Environment.
+ EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
+
+ // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud.
+ NotAvailable = "N/A"
+)
+
+var environments = map[string]Environment{
+ "AZURECHINACLOUD": ChinaCloud,
+ "AZUREGERMANCLOUD": GermanCloud,
+ "AZUREPUBLICCLOUD": PublicCloud,
+ "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
+}
+
+// ResourceIdentifier contains a set of Azure resource IDs.
+type ResourceIdentifier struct {
+ Graph string `json:"graph"`
+ KeyVault string `json:"keyVault"`
+ Datalake string `json:"datalake"`
+ Batch string `json:"batch"`
+ OperationalInsights string `json:"operationalInsights"`
+ Storage string `json:"storage"`
+}
+
+// Environment represents a set of endpoints for each of Azure's Clouds.
+type Environment struct {
+ Name string `json:"name"`
+ ManagementPortalURL string `json:"managementPortalURL"`
+ PublishSettingsURL string `json:"publishSettingsURL"`
+ ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
+ ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
+ ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
+ GalleryEndpoint string `json:"galleryEndpoint"`
+ KeyVaultEndpoint string `json:"keyVaultEndpoint"`
+ GraphEndpoint string `json:"graphEndpoint"`
+ ServiceBusEndpoint string `json:"serviceBusEndpoint"`
+ BatchManagementEndpoint string `json:"batchManagementEndpoint"`
+ StorageEndpointSuffix string `json:"storageEndpointSuffix"`
+ SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
+ TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
+ KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
+ ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
+ ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
+ ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
+ ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
+ CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
+ TokenAudience string `json:"tokenAudience"`
+ ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
+}
+
+var (
+ // PublicCloud is the default public Azure cloud environment
+ PublicCloud = Environment{
+ Name: "AzurePublicCloud",
+ ManagementPortalURL: "https://manage.windowsazure.com/",
+ PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index",
+ ServiceManagementEndpoint: "https://management.core.windows.net/",
+ ResourceManagerEndpoint: "https://management.azure.com/",
+ ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
+ GalleryEndpoint: "https://gallery.azure.com/",
+ KeyVaultEndpoint: "https://vault.azure.net/",
+ GraphEndpoint: "https://graph.windows.net/",
+ ServiceBusEndpoint: "https://servicebus.windows.net/",
+ BatchManagementEndpoint: "https://batch.core.windows.net/",
+ StorageEndpointSuffix: "core.windows.net",
+ SQLDatabaseDNSSuffix: "database.windows.net",
+ TrafficManagerDNSSuffix: "trafficmanager.net",
+ KeyVaultDNSSuffix: "vault.azure.net",
+ ServiceBusEndpointSuffix: "servicebus.windows.net",
+ ServiceManagementVMDNSSuffix: "cloudapp.net",
+ ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
+ ContainerRegistryDNSSuffix: "azurecr.io",
+ CosmosDBDNSSuffix: "documents.azure.com",
+ TokenAudience: "https://management.azure.com/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.windows.net/",
+ KeyVault: "https://vault.azure.net",
+ Datalake: "https://datalake.azure.net/",
+ Batch: "https://batch.core.windows.net/",
+ OperationalInsights: "https://api.loganalytics.io",
+ Storage: "https://storage.azure.com/",
+ },
+ }
+
+ // USGovernmentCloud is the cloud environment for the US Government
+ USGovernmentCloud = Environment{
+ Name: "AzureUSGovernmentCloud",
+ ManagementPortalURL: "https://manage.windowsazure.us/",
+ PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
+ ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
+ ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
+ ActiveDirectoryEndpoint: "https://login.microsoftonline.us/",
+ GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
+ KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
+ GraphEndpoint: "https://graph.windows.net/",
+ ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/",
+ BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/",
+ StorageEndpointSuffix: "core.usgovcloudapi.net",
+ SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
+ TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
+ KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
+ ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
+ ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
+ ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
+ ContainerRegistryDNSSuffix: "azurecr.us",
+ CosmosDBDNSSuffix: "documents.azure.us",
+ TokenAudience: "https://management.usgovcloudapi.net/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.windows.net/",
+ KeyVault: "https://vault.usgovcloudapi.net",
+ Datalake: NotAvailable,
+ Batch: "https://batch.core.usgovcloudapi.net/",
+ OperationalInsights: "https://api.loganalytics.us",
+ Storage: "https://storage.azure.com/",
+ },
+ }
+
+ // ChinaCloud is the cloud environment operated in China
+ ChinaCloud = Environment{
+ Name: "AzureChinaCloud",
+ ManagementPortalURL: "https://manage.chinacloudapi.com/",
+ PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index",
+ ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/",
+ ResourceManagerEndpoint: "https://management.chinacloudapi.cn/",
+ ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/",
+ GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
+ KeyVaultEndpoint: "https://vault.azure.cn/",
+ GraphEndpoint: "https://graph.chinacloudapi.cn/",
+ ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/",
+ BatchManagementEndpoint: "https://batch.chinacloudapi.cn/",
+ StorageEndpointSuffix: "core.chinacloudapi.cn",
+ SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
+ TrafficManagerDNSSuffix: "trafficmanager.cn",
+ KeyVaultDNSSuffix: "vault.azure.cn",
+ ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
+ ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
+ ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
+ ContainerRegistryDNSSuffix: "azurecr.cn",
+ CosmosDBDNSSuffix: "documents.azure.cn",
+ TokenAudience: "https://management.chinacloudapi.cn/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.chinacloudapi.cn/",
+ KeyVault: "https://vault.azure.cn",
+ Datalake: NotAvailable,
+ Batch: "https://batch.chinacloudapi.cn/",
+ OperationalInsights: NotAvailable,
+ Storage: "https://storage.azure.com/",
+ },
+ }
+
+ // GermanCloud is the cloud environment operated in Germany
+ GermanCloud = Environment{
+ Name: "AzureGermanCloud",
+ ManagementPortalURL: "http://portal.microsoftazure.de/",
+ PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index",
+ ServiceManagementEndpoint: "https://management.core.cloudapi.de/",
+ ResourceManagerEndpoint: "https://management.microsoftazure.de/",
+ ActiveDirectoryEndpoint: "https://login.microsoftonline.de/",
+ GalleryEndpoint: "https://gallery.cloudapi.de/",
+ KeyVaultEndpoint: "https://vault.microsoftazure.de/",
+ GraphEndpoint: "https://graph.cloudapi.de/",
+ ServiceBusEndpoint: "https://servicebus.cloudapi.de/",
+ BatchManagementEndpoint: "https://batch.cloudapi.de/",
+ StorageEndpointSuffix: "core.cloudapi.de",
+ SQLDatabaseDNSSuffix: "database.cloudapi.de",
+ TrafficManagerDNSSuffix: "azuretrafficmanager.de",
+ KeyVaultDNSSuffix: "vault.microsoftazure.de",
+ ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
+ ServiceManagementVMDNSSuffix: "azurecloudapp.de",
+ ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
+ ContainerRegistryDNSSuffix: NotAvailable,
+ CosmosDBDNSSuffix: "documents.microsoftazure.de",
+ TokenAudience: "https://management.microsoftazure.de/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.cloudapi.de/",
+ KeyVault: "https://vault.microsoftazure.de",
+ Datalake: NotAvailable,
+ Batch: "https://batch.cloudapi.de/",
+ OperationalInsights: NotAvailable,
+ Storage: "https://storage.azure.com/",
+ },
+ }
+)
+
+// EnvironmentFromName returns an Environment based on the common name specified.
+func EnvironmentFromName(name string) (Environment, error) {
+ // IMPORTANT
+ // As per @radhikagupta5:
+ // This is technical debt, fundamentally here because Kubernetes is not currently accepting
+ // contributions to the providers. Once that is an option, the provider should be updated to
+ // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation
+ // from this method based on the name that is provided to us.
+ if strings.EqualFold(name, "AZURESTACKCLOUD") {
+ return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName))
+ }
+
+ name = strings.ToUpper(name)
+ env, ok := environments[name]
+ if !ok {
+ return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
+ }
+
+ return env, nil
+}
+
+// EnvironmentFromFile loads an Environment from a configuration file available on disk.
+// This function is particularly useful in the Hybrid Cloud model, where one must define their own
+// endpoints.
+func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
+ fileContents, err := ioutil.ReadFile(location)
+ if err != nil {
+ return
+ }
+
+ err = json.Unmarshal(fileContents, &unmarshaled)
+
+ return
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
new file mode 100644
index 0000000..507f9e9
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
@@ -0,0 +1,245 @@
+package azure
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+type audience []string
+
+type authentication struct {
+ LoginEndpoint string `json:"loginEndpoint"`
+ Audiences audience `json:"audiences"`
+}
+
+type environmentMetadataInfo struct {
+ GalleryEndpoint string `json:"galleryEndpoint"`
+ GraphEndpoint string `json:"graphEndpoint"`
+ PortalEndpoint string `json:"portalEndpoint"`
+ Authentication authentication `json:"authentication"`
+}
+
+// EnvironmentProperty represent property names that clients can override
+type EnvironmentProperty string
+
+const (
+ // EnvironmentName ...
+ EnvironmentName EnvironmentProperty = "name"
+ // EnvironmentManagementPortalURL ..
+ EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL"
+ // EnvironmentPublishSettingsURL ...
+ EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL"
+ // EnvironmentServiceManagementEndpoint ...
+ EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint"
+ // EnvironmentResourceManagerEndpoint ...
+ EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint"
+ // EnvironmentActiveDirectoryEndpoint ...
+ EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint"
+ // EnvironmentGalleryEndpoint ...
+ EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint"
+ // EnvironmentKeyVaultEndpoint ...
+ EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint"
+ // EnvironmentGraphEndpoint ...
+ EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint"
+ // EnvironmentServiceBusEndpoint ...
+ EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint"
+ // EnvironmentBatchManagementEndpoint ...
+ EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint"
+ // EnvironmentStorageEndpointSuffix ...
+ EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix"
+ // EnvironmentSQLDatabaseDNSSuffix ...
+ EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix"
+ // EnvironmentTrafficManagerDNSSuffix ...
+ EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix"
+ // EnvironmentKeyVaultDNSSuffix ...
+ EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix"
+ // EnvironmentServiceBusEndpointSuffix ...
+ EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix"
+ // EnvironmentServiceManagementVMDNSSuffix ...
+ EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix"
+ // EnvironmentResourceManagerVMDNSSuffix ...
+ EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix"
+ // EnvironmentContainerRegistryDNSSuffix ...
+ EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix"
+ // EnvironmentTokenAudience ...
+ EnvironmentTokenAudience EnvironmentProperty = "tokenAudience"
+)
+
+// OverrideProperty represents property name and value that clients can override
+type OverrideProperty struct {
+ Key EnvironmentProperty
+ Value string
+}
+
+// EnvironmentFromURL loads an Environment from a URL
+// This function is particularly useful in the Hybrid Cloud model, where one may define their own
+// endpoints.
+func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) {
+ var metadataEnvProperties environmentMetadataInfo
+
+ if resourceManagerEndpoint == "" {
+ return environment, fmt.Errorf("Metadata resource manager endpoint is empty")
+ }
+
+ if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil {
+ return environment, err
+ }
+
+ // Give priority to user's override values
+ overrideProperties(&environment, properties)
+
+ if environment.Name == "" {
+ environment.Name = "HybridEnvironment"
+ }
+ stampDNSSuffix := environment.StorageEndpointSuffix
+ if stampDNSSuffix == "" {
+ stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/")
+ environment.StorageEndpointSuffix = stampDNSSuffix
+ }
+ if environment.KeyVaultDNSSuffix == "" {
+ environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix)
+ }
+ if environment.KeyVaultEndpoint == "" {
+ environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix)
+ }
+ if environment.TokenAudience == "" {
+ environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0]
+ }
+ if environment.ActiveDirectoryEndpoint == "" {
+ environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint
+ }
+ if environment.ResourceManagerEndpoint == "" {
+ environment.ResourceManagerEndpoint = resourceManagerEndpoint
+ }
+ if environment.GalleryEndpoint == "" {
+ environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint
+ }
+ if environment.GraphEndpoint == "" {
+ environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint
+ }
+
+ return environment, nil
+}
+
+func overrideProperties(environment *Environment, properties []OverrideProperty) {
+ for _, property := range properties {
+ switch property.Key {
+ case EnvironmentName:
+ {
+ environment.Name = property.Value
+ }
+ case EnvironmentManagementPortalURL:
+ {
+ environment.ManagementPortalURL = property.Value
+ }
+ case EnvironmentPublishSettingsURL:
+ {
+ environment.PublishSettingsURL = property.Value
+ }
+ case EnvironmentServiceManagementEndpoint:
+ {
+ environment.ServiceManagementEndpoint = property.Value
+ }
+ case EnvironmentResourceManagerEndpoint:
+ {
+ environment.ResourceManagerEndpoint = property.Value
+ }
+ case EnvironmentActiveDirectoryEndpoint:
+ {
+ environment.ActiveDirectoryEndpoint = property.Value
+ }
+ case EnvironmentGalleryEndpoint:
+ {
+ environment.GalleryEndpoint = property.Value
+ }
+ case EnvironmentKeyVaultEndpoint:
+ {
+ environment.KeyVaultEndpoint = property.Value
+ }
+ case EnvironmentGraphEndpoint:
+ {
+ environment.GraphEndpoint = property.Value
+ }
+ case EnvironmentServiceBusEndpoint:
+ {
+ environment.ServiceBusEndpoint = property.Value
+ }
+ case EnvironmentBatchManagementEndpoint:
+ {
+ environment.BatchManagementEndpoint = property.Value
+ }
+ case EnvironmentStorageEndpointSuffix:
+ {
+ environment.StorageEndpointSuffix = property.Value
+ }
+ case EnvironmentSQLDatabaseDNSSuffix:
+ {
+ environment.SQLDatabaseDNSSuffix = property.Value
+ }
+ case EnvironmentTrafficManagerDNSSuffix:
+ {
+ environment.TrafficManagerDNSSuffix = property.Value
+ }
+ case EnvironmentKeyVaultDNSSuffix:
+ {
+ environment.KeyVaultDNSSuffix = property.Value
+ }
+ case EnvironmentServiceBusEndpointSuffix:
+ {
+ environment.ServiceBusEndpointSuffix = property.Value
+ }
+ case EnvironmentServiceManagementVMDNSSuffix:
+ {
+ environment.ServiceManagementVMDNSSuffix = property.Value
+ }
+ case EnvironmentResourceManagerVMDNSSuffix:
+ {
+ environment.ResourceManagerVMDNSSuffix = property.Value
+ }
+ case EnvironmentContainerRegistryDNSSuffix:
+ {
+ environment.ContainerRegistryDNSSuffix = property.Value
+ }
+ case EnvironmentTokenAudience:
+ {
+ environment.TokenAudience = property.Value
+ }
+ }
+ }
+}
+
+func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) {
+ client := autorest.NewClientWithUserAgent("")
+ managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0")
+ req, _ := http.NewRequest("GET", managementEndpoint, nil)
+ response, err := client.Do(req)
+ if err != nil {
+ return environment, err
+ }
+ defer response.Body.Close()
+ jsonResponse, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return environment, err
+ }
+ err = json.Unmarshal(jsonResponse, &environment)
+ return environment, err
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
new file mode 100644
index 0000000..86ce9f2
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
@@ -0,0 +1,200 @@
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package azure
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// DoRetryWithRegistration tries to register the resource provider in case it is unregistered.
+// It also handles request retries
+func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
+ return func(s autorest.Sender) autorest.Sender {
+ return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
+ rr := autorest.NewRetriableRequest(r)
+ for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ {
+ err = rr.Prepare()
+ if err != nil {
+ return resp, err
+ }
+
+ resp, err = autorest.SendWithSender(s, rr.Request(),
+ autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
+ )
+ if err != nil {
+ return resp, err
+ }
+
+ if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration {
+ return resp, err
+ }
+ var re RequestError
+ err = autorest.Respond(
+ resp,
+ autorest.ByUnmarshallingJSON(&re),
+ )
+ if err != nil {
+ return resp, err
+ }
+ err = re
+
+ if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" {
+ regErr := register(client, r, re)
+ if regErr != nil {
+ return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err)
+ }
+ }
+ }
+ return resp, err
+ })
+ }
+}
+
+func getProvider(re RequestError) (string, error) {
+ if re.ServiceError != nil && len(re.ServiceError.Details) > 0 {
+ return re.ServiceError.Details[0]["target"].(string), nil
+ }
+ return "", errors.New("provider was not found in the response")
+}
+
+func register(client autorest.Client, originalReq *http.Request, re RequestError) error {
+ subID := getSubscription(originalReq.URL.Path)
+ if subID == "" {
+ return errors.New("missing parameter subscriptionID to register resource provider")
+ }
+ providerName, err := getProvider(re)
+ if err != nil {
+ return fmt.Errorf("missing parameter provider to register resource provider: %s", err)
+ }
+ newURL := url.URL{
+ Scheme: originalReq.URL.Scheme,
+ Host: originalReq.URL.Host,
+ }
+
+ // taken from the resources SDK
+ // with almost identical code, this sections are easier to mantain
+ // It is also not a good idea to import the SDK here
+ // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252
+ pathParameters := map[string]interface{}{
+ "resourceProviderNamespace": autorest.Encode("path", providerName),
+ "subscriptionId": autorest.Encode("path", subID),
+ }
+
+ const APIVersion = "2016-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(newURL.String()),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ )
+
+ req, err := preparer.Prepare(&http.Request{})
+ if err != nil {
+ return err
+ }
+ req = req.WithContext(originalReq.Context())
+
+ resp, err := autorest.SendWithSender(client, req,
+ autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
+ )
+ if err != nil {
+ return err
+ }
+
+ type Provider struct {
+ RegistrationState *string `json:"registrationState,omitempty"`
+ }
+ var provider Provider
+
+ err = autorest.Respond(
+ resp,
+ WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&provider),
+ autorest.ByClosing(),
+ )
+ if err != nil {
+ return err
+ }
+
+ // poll for registered provisioning state
+ registrationStartTime := time.Now()
+ for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
+ // taken from the resources SDK
+ // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(newURL.String()),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
+ autorest.WithQueryParameters(queryParameters),
+ )
+ req, err = preparer.Prepare(&http.Request{})
+ if err != nil {
+ return err
+ }
+ req = req.WithContext(originalReq.Context())
+
+ resp, err := autorest.SendWithSender(client, req,
+ autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
+ )
+ if err != nil {
+ return err
+ }
+
+ err = autorest.Respond(
+ resp,
+ WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&provider),
+ autorest.ByClosing(),
+ )
+ if err != nil {
+ return err
+ }
+
+ if provider.RegistrationState != nil &&
+ *provider.RegistrationState == "Registered" {
+ break
+ }
+
+ delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done())
+ if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) {
+ return originalReq.Context().Err()
+ }
+ }
+ if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
+ return errors.New("polling for resource provider registration has exceeded the polling duration")
+ }
+ return err
+}
+
+func getSubscription(path string) string {
+ parts := strings.Split(path, "/")
+ for i, v := range parts {
+ if v == "subscriptions" && (i+1) < len(parts) {
+ return parts[i+1]
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go
new file mode 100644
index 0000000..1c6a061
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/client.go
@@ -0,0 +1,300 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Azure/go-autorest/logger"
+)
+
+const (
+ // DefaultPollingDelay is a reasonable delay between polling requests.
+ DefaultPollingDelay = 60 * time.Second
+
+ // DefaultPollingDuration is a reasonable total polling duration.
+ DefaultPollingDuration = 15 * time.Minute
+
+ // DefaultRetryAttempts is number of attempts for retry status codes (5xx).
+ DefaultRetryAttempts = 3
+
+ // DefaultRetryDuration is the duration to wait between retries.
+ DefaultRetryDuration = 30 * time.Second
+)
+
+var (
+ // StatusCodesForRetry are a defined group of status code for which the client will retry
+ StatusCodesForRetry = []int{
+ http.StatusRequestTimeout, // 408
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+ }
+)
+
+const (
+ requestFormat = `HTTP Request Begin ===================================================
+%s
+===================================================== HTTP Request End
+`
+ responseFormat = `HTTP Response Begin ===================================================
+%s
+===================================================== HTTP Response End
+`
+)
+
+// Response serves as the base for all responses from generated clients. It provides access to the
+// last http.Response.
+type Response struct {
+ *http.Response `json:"-"`
+}
+
+// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code.
+// If there was no response (i.e. the underlying http.Response is nil) the return value is false.
+func (r Response) IsHTTPStatus(statusCode int) bool {
+ if r.Response == nil {
+ return false
+ }
+ return r.Response.StatusCode == statusCode
+}
+
+// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes.
+// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided
+// the return value is false.
+func (r Response) HasHTTPStatus(statusCodes ...int) bool {
+ return ResponseHasStatusCode(r.Response, statusCodes...)
+}
+
+// LoggingInspector implements request and response inspectors that log the full request and
+// response to a supplied log.
+type LoggingInspector struct {
+ Logger *log.Logger
+}
+
+// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
+// body is restored after being emitted.
+//
+// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
+// important. It is best used to trace JSON or similar body values.
+func (li LoggingInspector) WithInspection() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ var body, b bytes.Buffer
+
+ defer r.Body.Close()
+
+ r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
+ if err := r.Write(&b); err != nil {
+ return nil, fmt.Errorf("Failed to write response: %v", err)
+ }
+
+ li.Logger.Printf(requestFormat, b.String())
+
+ r.Body = ioutil.NopCloser(&body)
+ return p.Prepare(r)
+ })
+ }
+}
+
+// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
+// body is restored after being emitted.
+//
+// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
+// important. It is best used to trace JSON or similar body values.
+func (li LoggingInspector) ByInspecting() RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ var body, b bytes.Buffer
+ defer resp.Body.Close()
+ resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
+ if err := resp.Write(&b); err != nil {
+ return fmt.Errorf("Failed to write response: %v", err)
+ }
+
+ li.Logger.Printf(responseFormat, b.String())
+
+ resp.Body = ioutil.NopCloser(&body)
+ return r.Respond(resp)
+ })
+ }
+}
+
+// Client is the base for autorest generated clients. It provides default, "do nothing"
+// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
+// standard, undecorated http.Client as a default Sender.
+//
+// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
+// return responses that compose with Response.
+//
+// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
+// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
+// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
+// sending the request by providing a decorated Sender.
+type Client struct {
+ Authorizer Authorizer
+ Sender Sender
+ RequestInspector PrepareDecorator
+ ResponseInspector RespondDecorator
+
+ // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
+ PollingDelay time.Duration
+
+ // PollingDuration sets the maximum polling time after which an error is returned.
+ // Setting this to zero will use the provided context to control the duration.
+ PollingDuration time.Duration
+
+ // RetryAttempts sets the default number of retry attempts for client.
+ RetryAttempts int
+
+ // RetryDuration sets the delay duration for retries.
+ RetryDuration time.Duration
+
+ // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
+ // through the Do method.
+ UserAgent string
+
+ Jar http.CookieJar
+
+ // Set to true to skip attempted registration of resource providers (false by default).
+ SkipResourceProviderRegistration bool
+}
+
+// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
+// string.
+func NewClientWithUserAgent(ua string) Client {
+ return newClient(ua, tls.RenegotiateNever)
+}
+
+// ClientOptions contains various Client configuration options.
+type ClientOptions struct {
+ // UserAgent is an optional user-agent string to append to the default user agent.
+ UserAgent string
+
+ // Renegotiation is an optional setting to control client-side TLS renegotiation.
+ Renegotiation tls.RenegotiationSupport
+}
+
+// NewClientWithOptions returns an instance of a Client with the specified values.
+func NewClientWithOptions(options ClientOptions) Client {
+ return newClient(options.UserAgent, options.Renegotiation)
+}
+
+func newClient(ua string, renegotiation tls.RenegotiationSupport) Client {
+ c := Client{
+ PollingDelay: DefaultPollingDelay,
+ PollingDuration: DefaultPollingDuration,
+ RetryAttempts: DefaultRetryAttempts,
+ RetryDuration: DefaultRetryDuration,
+ UserAgent: UserAgent(),
+ }
+ c.Sender = c.sender(renegotiation)
+ c.AddToUserAgent(ua)
+ return c
+}
+
+// AddToUserAgent adds an extension to the current user agent
+func (c *Client) AddToUserAgent(extension string) error {
+ if extension != "" {
+ c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension)
+ return nil
+ }
+ return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)
+}
+
+// Do implements the Sender interface by invoking the active Sender after applying authorization.
+// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
+// is set, apply set the User-Agent header.
+func (c Client) Do(r *http.Request) (*http.Response, error) {
+ if r.UserAgent() == "" {
+ r, _ = Prepare(r,
+ WithUserAgent(c.UserAgent))
+ }
+ // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations
+ r, err := Prepare(r,
+ c.WithAuthorization(),
+ c.WithInspection())
+ if err != nil {
+ var resp *http.Response
+ if detErr, ok := err.(DetailedError); ok {
+ // if the authorization failed (e.g. invalid credentials) there will
+ // be a response associated with the error, be sure to return it.
+ resp = detErr.Response
+ }
+ return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
+ }
+ logger.Instance.WriteRequest(r, logger.Filter{
+ Header: func(k string, v []string) (bool, []string) {
+ // remove the auth token from the log
+ if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") {
+ v = []string{"**REDACTED**"}
+ }
+ return true, v
+ },
+ })
+ resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
+ logger.Instance.WriteResponse(resp, logger.Filter{})
+ Respond(resp, c.ByInspecting())
+ return resp, err
+}
+
+// sender returns the Sender to which to send requests.
+func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
+ if c.Sender == nil {
+ return sender(renengotiation)
+ }
+ return c.Sender
+}
+
+// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
+// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
+func (c Client) WithAuthorization() PrepareDecorator {
+ return c.authorizer().WithAuthorization()
+}
+
+// authorizer returns the Authorizer to use.
+func (c Client) authorizer() Authorizer {
+ if c.Authorizer == nil {
+ return NullAuthorizer{}
+ }
+ return c.Authorizer
+}
+
+// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
+// if present, or returns the WithNothing PrepareDecorator otherwise.
+func (c Client) WithInspection() PrepareDecorator {
+ if c.RequestInspector == nil {
+ return WithNothing()
+ }
+ return c.RequestInspector
+}
+
+// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
+// if present, or returns the ByIgnoring RespondDecorator otherwise.
+func (c Client) ByInspecting() RespondDecorator {
+ if c.ResponseInspector == nil {
+ return ByIgnoring()
+ }
+ return c.ResponseInspector
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go
new file mode 100644
index 0000000..c457106
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go
@@ -0,0 +1,96 @@
+/*
+Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
+defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of
+time.Time types. And both convert to time.Time through a ToTime method.
+*/
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ fullDate = "2006-01-02"
+ fullDateJSON = `"2006-01-02"`
+ dateFormat = "%04d-%02d-%02d"
+ jsonFormat = `"%04d-%02d-%02d"`
+)
+
+// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
+// 2006-01-02).
+type Date struct {
+ time.Time
+}
+
+// ParseDate create a new Date from the passed string.
+func ParseDate(date string) (d Date, err error) {
+ return parseDate(date, fullDate)
+}
+
+func parseDate(date string, format string) (Date, error) {
+ d, err := time.Parse(format, date)
+ return Date{Time: d}, err
+}
+
+// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
+// 2006-01-02).
+func (d Date) MarshalBinary() ([]byte, error) {
+ return d.MarshalText()
+}
+
+// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
+// 2006-01-02).
+func (d *Date) UnmarshalBinary(data []byte) error {
+ return d.UnmarshalText(data)
+}
+
+// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
+// 2006-01-02).
+func (d Date) MarshalJSON() (json []byte, err error) {
+ return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil
+}
+
+// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
+// 2006-01-02).
+func (d *Date) UnmarshalJSON(data []byte) (err error) {
+ d.Time, err = time.Parse(fullDateJSON, string(data))
+ return err
+}
+
+// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
+// 2006-01-02).
+func (d Date) MarshalText() (text []byte, err error) {
+ return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil
+}
+
+// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
+// 2006-01-02).
+func (d *Date) UnmarshalText(data []byte) (err error) {
+ d.Time, err = time.Parse(fullDate, string(data))
+ return err
+}
+
+// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
+func (d Date) String() string {
+ return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())
+}
+
+// ToTime returns a Date as a time.Time
+func (d Date) ToTime() time.Time {
+ return d.Time
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod
new file mode 100644
index 0000000..3adc480
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod
@@ -0,0 +1,5 @@
+module github.com/Azure/go-autorest/autorest/date
+
+go 1.12
+
+require github.com/Azure/go-autorest/autorest v0.9.0
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum
new file mode 100644
index 0000000..9e2ee7a
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum
@@ -0,0 +1,16 @@
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
new file mode 100644
index 0000000..55adf93
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest/autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go
new file mode 100644
index 0000000..b453fad
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go
@@ -0,0 +1,103 @@
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "regexp"
+ "time"
+)
+
+// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
+const (
+ azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"`
+ azureUtcFormat = "2006-01-02T15:04:05.999999999"
+ rfc3339JSON = `"` + time.RFC3339Nano + `"`
+ rfc3339 = time.RFC3339Nano
+ tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$`
+)
+
+// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
+// 2006-01-02T15:04:05Z).
+type Time struct {
+ time.Time
+}
+
+// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
+// 2006-01-02T15:04:05Z).
+func (t Time) MarshalBinary() ([]byte, error) {
+ return t.Time.MarshalText()
+}
+
+// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
+// (i.e., 2006-01-02T15:04:05Z).
+func (t *Time) UnmarshalBinary(data []byte) error {
+ return t.UnmarshalText(data)
+}
+
+// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
+// 2006-01-02T15:04:05Z).
+func (t Time) MarshalJSON() (json []byte, err error) {
+ return t.Time.MarshalJSON()
+}
+
+// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
+// (i.e., 2006-01-02T15:04:05Z).
+func (t *Time) UnmarshalJSON(data []byte) (err error) {
+ timeFormat := azureUtcFormatJSON
+ match, err := regexp.Match(tzOffsetRegex, data)
+ if err != nil {
+ return err
+ } else if match {
+ timeFormat = rfc3339JSON
+ }
+ t.Time, err = ParseTime(timeFormat, string(data))
+ return err
+}
+
+// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
+// 2006-01-02T15:04:05Z).
+func (t Time) MarshalText() (text []byte, err error) {
+ return t.Time.MarshalText()
+}
+
+// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
+// (i.e., 2006-01-02T15:04:05Z).
+func (t *Time) UnmarshalText(data []byte) (err error) {
+ timeFormat := azureUtcFormat
+ match, err := regexp.Match(tzOffsetRegex, data)
+ if err != nil {
+ return err
+ } else if match {
+ timeFormat = rfc3339
+ }
+ t.Time, err = ParseTime(timeFormat, string(data))
+ return err
+}
+
+// String returns the Time formatted as an RFC3339 date-time string (i.e.,
+// 2006-01-02T15:04:05Z).
+func (t Time) String() string {
+ // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
+ b, err := t.MarshalText()
+ if err != nil {
+ return ""
+ }
+ return string(b)
+}
+
+// ToTime returns a Time as a time.Time
+func (t Time) ToTime() time.Time {
+ return t.Time
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
new file mode 100644
index 0000000..48fb39b
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
@@ -0,0 +1,100 @@
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "errors"
+ "time"
+)
+
+const (
+ rfc1123JSON = `"` + time.RFC1123 + `"`
+ rfc1123 = time.RFC1123
+)
+
+// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
+// Mon, 02 Jan 2006 15:04:05 MST).
+type TimeRFC1123 struct {
+ time.Time
+}
+
+// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
+// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
+func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) {
+ t.Time, err = ParseTime(rfc1123JSON, string(data))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
+// Mon, 02 Jan 2006 15:04:05 MST).
+func (t TimeRFC1123) MarshalJSON() ([]byte, error) {
+ if y := t.Year(); y < 0 || y >= 10000 {
+ return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
+ }
+ b := []byte(t.Format(rfc1123JSON))
+ return b, nil
+}
+
+// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
+// Mon, 02 Jan 2006 15:04:05 MST).
+func (t TimeRFC1123) MarshalText() ([]byte, error) {
+ if y := t.Year(); y < 0 || y >= 10000 {
+ return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
+ }
+
+ b := []byte(t.Format(rfc1123))
+ return b, nil
+}
+
+// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
+// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
+func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) {
+ t.Time, err = ParseTime(rfc1123, string(data))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
+// Mon, 02 Jan 2006 15:04:05 MST).
+func (t TimeRFC1123) MarshalBinary() ([]byte, error) {
+ return t.MarshalText()
+}
+
+// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
+// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
+func (t *TimeRFC1123) UnmarshalBinary(data []byte) error {
+ return t.UnmarshalText(data)
+}
+
+// ToTime returns a Time as a time.Time
+func (t TimeRFC1123) ToTime() time.Time {
+ return t.Time
+}
+
+// String returns the Time formatted as an RFC1123 date-time string (i.e.,
+// Mon, 02 Jan 2006 15:04:05 MST).
+func (t TimeRFC1123) String() string {
+ // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
+ b, err := t.MarshalText()
+ if err != nil {
+ return ""
+ }
+ return string(b)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go
new file mode 100644
index 0000000..7073959
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go
@@ -0,0 +1,123 @@
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "time"
+)
+
+// unixEpoch is the moment in time that should be treated as timestamp 0.
+var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC)
+
+// UnixTime marshals and unmarshals a time that is represented as the number
+// of seconds (ignoring skip-seconds) since the Unix Epoch.
+type UnixTime time.Time
+
+// Duration returns the time as a Duration since the UnixEpoch.
+func (t UnixTime) Duration() time.Duration {
+ return time.Time(t).Sub(unixEpoch)
+}
+
+// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch.
+func NewUnixTimeFromSeconds(seconds float64) UnixTime {
+ return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second)))
+}
+
+// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch.
+func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime {
+ return NewUnixTimeFromDuration(time.Duration(nanoseconds))
+}
+
+// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch.
+func NewUnixTimeFromDuration(dur time.Duration) UnixTime {
+ return UnixTime(unixEpoch.Add(dur))
+}
+
+// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0'
+func UnixEpoch() time.Time {
+ return unixEpoch
+}
+
+// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements.
+// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.)
+func (t UnixTime) MarshalJSON() ([]byte, error) {
+ buffer := &bytes.Buffer{}
+ enc := json.NewEncoder(buffer)
+ err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9)
+ if err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since
+// midnight January 1st, 1970.
+func (t *UnixTime) UnmarshalJSON(text []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(text))
+
+ var secondsSinceEpoch float64
+ if err := dec.Decode(&secondsSinceEpoch); err != nil {
+ return err
+ }
+
+ *t = NewUnixTimeFromSeconds(secondsSinceEpoch)
+
+ return nil
+}
+
+// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number.
+func (t UnixTime) MarshalText() ([]byte, error) {
+ cast := time.Time(t)
+ return cast.MarshalText()
+}
+
+// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch.
+func (t *UnixTime) UnmarshalText(raw []byte) error {
+ var unmarshaled time.Time
+
+ if err := unmarshaled.UnmarshalText(raw); err != nil {
+ return err
+ }
+
+ *t = UnixTime(unmarshaled)
+ return nil
+}
+
+// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch.
+func (t UnixTime) MarshalBinary() ([]byte, error) {
+ buf := &bytes.Buffer{}
+
+ payload := int64(t.Duration())
+
+ if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime.
+func (t *UnixTime) UnmarshalBinary(raw []byte) error {
+ var nanosecondsSinceEpoch int64
+
+ if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil {
+ return err
+ }
+ *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch)
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go
new file mode 100644
index 0000000..12addf0
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go
@@ -0,0 +1,25 @@
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "strings"
+ "time"
+)
+
+// ParseTime to parse Time string to specified format.
+func ParseTime(format string, t string) (d time.Time, err error) {
+ return time.Parse(format, strings.ToUpper(t))
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go
new file mode 100644
index 0000000..f724f33
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/error.go
@@ -0,0 +1,98 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "fmt"
+ "net/http"
+)
+
+const (
+ // UndefinedStatusCode is used when HTTP status code is not available for an error.
+ UndefinedStatusCode = 0
+)
+
+// DetailedError encloses a error with details of the package, method, and associated HTTP
+// status code (if any).
+type DetailedError struct {
+ Original error
+
+ // PackageType is the package type of the object emitting the error. For types, the value
+ // matches that produced the the '%T' format specifier of the fmt package. For other elements,
+ // such as functions, it is just the package name (e.g., "autorest").
+ PackageType string
+
+ // Method is the name of the method raising the error.
+ Method string
+
+ // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error.
+ StatusCode interface{}
+
+ // Message is the error message.
+ Message string
+
+ // Service Error is the response body of failed API in bytes
+ ServiceError []byte
+
+ // Response is the response object that was returned during failure if applicable.
+ Response *http.Response
+}
+
+// NewError creates a new Error conforming object from the passed packageType, method, and
+// message. message is treated as a format string to which the optional args apply.
+func NewError(packageType string, method string, message string, args ...interface{}) DetailedError {
+ return NewErrorWithError(nil, packageType, method, nil, message, args...)
+}
+
+// NewErrorWithResponse creates a new Error conforming object from the passed
+// packageType, method, statusCode of the given resp (UndefinedStatusCode if
+// resp is nil), and message. message is treated as a format string to which the
+// optional args apply.
+func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
+ return NewErrorWithError(nil, packageType, method, resp, message, args...)
+}
+
+// NewErrorWithError creates a new Error conforming object from the
+// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
+// if resp is nil), message, and original error. message is treated as a format
+// string to which the optional args apply.
+func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
+ if v, ok := original.(DetailedError); ok {
+ return v
+ }
+
+ statusCode := UndefinedStatusCode
+ if resp != nil {
+ statusCode = resp.StatusCode
+ }
+
+ return DetailedError{
+ Original: original,
+ PackageType: packageType,
+ Method: method,
+ StatusCode: statusCode,
+ Message: fmt.Sprintf(message, args...),
+ Response: resp,
+ }
+}
+
+// Error returns a formatted containing all available details (i.e., PackageType, Method,
+// StatusCode, Message, and original error (if any)).
+func (e DetailedError) Error() string {
+ if e.Original == nil {
+ return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode)
+ }
+ return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod
new file mode 100644
index 0000000..ab2ae66
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest/adal v0.5.0
+ github.com/Azure/go-autorest/autorest/mocks v0.2.0
+ github.com/Azure/go-autorest/logger v0.1.0
+ github.com/Azure/go-autorest/tracing v0.5.0
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum
new file mode 100644
index 0000000..729b99c
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum
@@ -0,0 +1,18 @@
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
new file mode 100644
index 0000000..6e8ed64
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
@@ -0,0 +1,550 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ mimeTypeJSON = "application/json"
+ mimeTypeOctetStream = "application/octet-stream"
+ mimeTypeFormPost = "application/x-www-form-urlencoded"
+
+ headerAuthorization = "Authorization"
+ headerAuxAuthorization = "x-ms-authorization-auxiliary"
+ headerContentType = "Content-Type"
+ headerUserAgent = "User-Agent"
+)
+
+// used as a key type in context.WithValue()
+type ctxPrepareDecorators struct{}
+
+// WithPrepareDecorators adds the specified PrepareDecorators to the provided context.
+// If no PrepareDecorators are provided the context is unchanged.
+func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context {
+ if len(prepareDecorator) == 0 {
+ return ctx
+ }
+ return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator)
+}
+
+// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators.
+func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator {
+ inCtx := ctx.Value(ctxPrepareDecorators{})
+ if pd, ok := inCtx.([]PrepareDecorator); ok {
+ return pd
+ }
+ return defaultPrepareDecorators
+}
+
+// Preparer is the interface that wraps the Prepare method.
+//
+// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
+// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used.
+type Preparer interface {
+ Prepare(*http.Request) (*http.Request, error)
+}
+
+// PreparerFunc is a method that implements the Preparer interface.
+type PreparerFunc func(*http.Request) (*http.Request, error)
+
+// Prepare implements the Preparer interface on PreparerFunc.
+func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) {
+ return pf(r)
+}
+
+// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the
+// http.Request and pass it along or, first, pass the http.Request along then affect the result.
+type PrepareDecorator func(Preparer) Preparer
+
+// CreatePreparer creates, decorates, and returns a Preparer.
+// Without decorators, the returned Preparer returns the passed http.Request unmodified.
+// Preparers are safe to share and re-use.
+func CreatePreparer(decorators ...PrepareDecorator) Preparer {
+ return DecoratePreparer(
+ Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })),
+ decorators...)
+}
+
+// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it
+// applies to the Preparer. Decorators are applied in the order received, but their affect upon the
+// request depends on whether they are a pre-decorator (change the http.Request and then pass it
+// along) or a post-decorator (pass the http.Request along and alter it on return).
+func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer {
+ for _, decorate := range decorators {
+ p = decorate(p)
+ }
+ return p
+}
+
+// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators.
+// It creates a Preparer from the decorators which it then applies to the passed http.Request.
+func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) {
+ if r == nil {
+ return nil, NewError("autorest", "Prepare", "Invoked without an http.Request")
+ }
+ return CreatePreparer(decorators...).Prepare(r)
+}
+
+// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed
+// http.Request.
+func WithNothing() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ return p.Prepare(r)
+ })
+ }
+}
+
+// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to
+// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before
+// adding the header.
+func WithHeader(header string, value string) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if r.Header == nil {
+ r.Header = make(http.Header)
+ }
+ r.Header.Set(http.CanonicalHeaderKey(header), value)
+ }
+ return r, err
+ })
+ }
+}
+
+// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to
+// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before
+// adding them.
+func WithHeaders(headers map[string]interface{}) PrepareDecorator {
+ h := ensureValueStrings(headers)
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if r.Header == nil {
+ r.Header = make(http.Header)
+ }
+
+ for name, value := range h {
+ r.Header.Set(http.CanonicalHeaderKey(name), value)
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
+// value is "Bearer " followed by the supplied token.
+func WithBearerAuthorization(token string) PrepareDecorator {
+ return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token))
+}
+
+// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value
+// is the passed contentType.
+func AsContentType(contentType string) PrepareDecorator {
+ return WithHeader(headerContentType, contentType)
+}
+
+// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the
+// passed string.
+func WithUserAgent(ua string) PrepareDecorator {
+ return WithHeader(headerUserAgent, ua)
+}
+
+// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
+// "application/x-www-form-urlencoded".
+func AsFormURLEncoded() PrepareDecorator {
+ return AsContentType(mimeTypeFormPost)
+}
+
+// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
+// "application/json".
+func AsJSON() PrepareDecorator {
+ return AsContentType(mimeTypeJSON)
+}
+
+// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header.
+func AsOctetStream() PrepareDecorator {
+ return AsContentType(mimeTypeOctetStream)
+}
+
+// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
+// decorator does not validate that the passed method string is a known HTTP method.
+func WithMethod(method string) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r.Method = method
+ return p.Prepare(r)
+ })
+ }
+}
+
+// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE.
+func AsDelete() PrepareDecorator { return WithMethod("DELETE") }
+
+// AsGet returns a PrepareDecorator that sets the HTTP method to GET.
+func AsGet() PrepareDecorator { return WithMethod("GET") }
+
+// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
+func AsHead() PrepareDecorator { return WithMethod("HEAD") }
+
+// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE.
+func AsMerge() PrepareDecorator { return WithMethod("MERGE") }
+
+// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
+func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
+
+// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH.
+func AsPatch() PrepareDecorator { return WithMethod("PATCH") }
+
+// AsPost returns a PrepareDecorator that sets the HTTP method to POST.
+func AsPost() PrepareDecorator { return WithMethod("POST") }
+
+// AsPut returns a PrepareDecorator that sets the HTTP method to PUT.
+func AsPut() PrepareDecorator { return WithMethod("PUT") }
+
+// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed
+// from the supplied baseUrl.
+func WithBaseURL(baseURL string) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ var u *url.URL
+ if u, err = url.Parse(baseURL); err != nil {
+ return r, err
+ }
+ if u.Scheme == "" {
+ err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
+ }
+ if err == nil {
+ r.URL = u
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// WithBytes returns a PrepareDecorator that takes a list of bytes
+// which passes the bytes directly to the body
+func WithBytes(input *[]byte) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if input == nil {
+ return r, fmt.Errorf("Input Bytes was nil")
+ }
+
+ r.ContentLength = int64(len(*input))
+ r.Body = ioutil.NopCloser(bytes.NewReader(*input))
+ }
+ return r, err
+ })
+ }
+}
+
+// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
+// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
+func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
+ parameters := ensureValueStrings(urlParameters)
+ for key, value := range parameters {
+ baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1)
+ }
+ return WithBaseURL(baseURL)
+}
+
+// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the
+// http.Request body.
+func WithFormData(v url.Values) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ s := v.Encode()
+
+ if r.Header == nil {
+ r.Header = make(http.Header)
+ }
+ r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
+ r.ContentLength = int64(len(s))
+ r.Body = ioutil.NopCloser(strings.NewReader(s))
+ }
+ return r, err
+ })
+ }
+}
+
+// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters
+// into the http.Request body.
+func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ var body bytes.Buffer
+ writer := multipart.NewWriter(&body)
+ for key, value := range formDataParameters {
+ if rc, ok := value.(io.ReadCloser); ok {
+ var fd io.Writer
+ if fd, err = writer.CreateFormFile(key, key); err != nil {
+ return r, err
+ }
+ if _, err = io.Copy(fd, rc); err != nil {
+ return r, err
+ }
+ } else {
+ if err = writer.WriteField(key, ensureValueString(value)); err != nil {
+ return r, err
+ }
+ }
+ }
+ if err = writer.Close(); err != nil {
+ return r, err
+ }
+ if r.Header == nil {
+ r.Header = make(http.Header)
+ }
+ r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
+ r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
+ r.ContentLength = int64(body.Len())
+ return r, err
+ }
+ return r, err
+ })
+ }
+}
+
+// WithFile returns a PrepareDecorator that sends file in request body.
+func WithFile(f io.ReadCloser) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ return r, err
+ }
+ r.Body = ioutil.NopCloser(bytes.NewReader(b))
+ r.ContentLength = int64(len(b))
+ }
+ return r, err
+ })
+ }
+}
+
+// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request
+// and sets the Content-Length header.
+func WithBool(v bool) PrepareDecorator {
+ return WithString(fmt.Sprintf("%v", v))
+}
+
+// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the
+// request and sets the Content-Length header.
+func WithFloat32(v float32) PrepareDecorator {
+ return WithString(fmt.Sprintf("%v", v))
+}
+
+// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the
+// request and sets the Content-Length header.
+func WithFloat64(v float64) PrepareDecorator {
+ return WithString(fmt.Sprintf("%v", v))
+}
+
+// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request
+// and sets the Content-Length header.
+func WithInt32(v int32) PrepareDecorator {
+ return WithString(fmt.Sprintf("%v", v))
+}
+
+// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request
+// and sets the Content-Length header.
+func WithInt64(v int64) PrepareDecorator {
+ return WithString(fmt.Sprintf("%v", v))
+}
+
+// WithString returns a PrepareDecorator that encodes the passed string into the body of the request
+// and sets the Content-Length header.
+func WithString(v string) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ r.ContentLength = int64(len(v))
+ r.Body = ioutil.NopCloser(strings.NewReader(v))
+ }
+ return r, err
+ })
+ }
+}
+
+// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the
+// request and sets the Content-Length header.
+func WithJSON(v interface{}) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ b, err := json.Marshal(v)
+ if err == nil {
+ r.ContentLength = int64(len(b))
+ r.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the
+// request and sets the Content-Length header.
+func WithXML(v interface{}) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ b, err := xml.Marshal(v)
+ if err == nil {
+ // we have to tack on an XML header
+ withHeader := xml.Header + string(b)
+ bytesWithHeader := []byte(withHeader)
+
+ r.ContentLength = int64(len(bytesWithHeader))
+ r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
+// is absolute (that is, it begins with a "/"), it replaces the existing path.
+func WithPath(path string) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if r.URL == nil {
+ return r, NewError("autorest", "WithPath", "Invoked with a nil URL")
+ }
+ if r.URL, err = parseURL(r.URL, path); err != nil {
+ return r, err
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
+// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The
+// values will be escaped (aka URL encoded) before insertion into the path.
+func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
+ parameters := escapeValueStrings(ensureValueStrings(pathParameters))
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if r.URL == nil {
+ return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL")
+ }
+ for key, value := range parameters {
+ path = strings.Replace(path, "{"+key+"}", value, -1)
+ }
+ if r.URL, err = parseURL(r.URL, path); err != nil {
+ return r, err
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
+// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map.
+func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
+ parameters := ensureValueStrings(pathParameters)
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if r.URL == nil {
+ return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL")
+ }
+ for key, value := range parameters {
+ path = strings.Replace(path, "{"+key+"}", value, -1)
+ }
+
+ if r.URL, err = parseURL(r.URL, path); err != nil {
+ return r, err
+ }
+ }
+ return r, err
+ })
+ }
+}
+
+func parseURL(u *url.URL, path string) (*url.URL, error) {
+ p := strings.TrimRight(u.String(), "/")
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+ return url.Parse(p + path)
+}
+
+// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
+// given in the supplied map (i.e., key=value).
+func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
+ parameters := MapToValues(queryParameters)
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if r.URL == nil {
+ return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
+ }
+ v := r.URL.Query()
+ for key, value := range parameters {
+ for i := range value {
+ d, err := url.QueryUnescape(value[i])
+ if err != nil {
+ return r, err
+ }
+ value[i] = d
+ }
+ v[key] = value
+ }
+ r.URL.RawQuery = v.Encode()
+ }
+ return r, err
+ })
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go
new file mode 100644
index 0000000..349e196
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go
@@ -0,0 +1,269 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+// Responder is the interface that wraps the Respond method.
+//
+// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold
+// state since Responders may be shared and re-used.
+type Responder interface {
+ Respond(*http.Response) error
+}
+
+// ResponderFunc is a method that implements the Responder interface.
+type ResponderFunc func(*http.Response) error
+
+// Respond implements the Responder interface on ResponderFunc.
+func (rf ResponderFunc) Respond(r *http.Response) error {
+ return rf(r)
+}
+
+// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to
+// the http.Response and pass it along or, first, pass the http.Response along then react.
+type RespondDecorator func(Responder) Responder
+
+// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned
+// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share
+// and re-used: It depends on the applied decorators. For example, a standard decorator that closes
+// the response body is fine to share whereas a decorator that reads the body into a passed struct
+// is not.
+//
+// To prevent memory leaks, ensure that at least one Responder closes the response body.
+func CreateResponder(decorators ...RespondDecorator) Responder {
+ return DecorateResponder(
+ Responder(ResponderFunc(func(r *http.Response) error { return nil })),
+ decorators...)
+}
+
+// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it
+// applies to the Responder. Decorators are applied in the order received, but their affect upon the
+// request depends on whether they are a pre-decorator (react to the http.Response and then pass it
+// along) or a post-decorator (pass the http.Response along and then react).
+func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder {
+ for _, decorate := range decorators {
+ r = decorate(r)
+ }
+ return r
+}
+
+// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators.
+// It creates a Responder from the decorators it then applies to the passed http.Response.
+func Respond(r *http.Response, decorators ...RespondDecorator) error {
+ if r == nil {
+ return nil
+ }
+ return CreateResponder(decorators...).Respond(r)
+}
+
+// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined
+// to the next RespondDecorator.
+func ByIgnoring() RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ return r.Respond(resp)
+ })
+ }
+}
+
+// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as
+// the Body is read.
+func ByCopying(b *bytes.Buffer) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil && resp != nil && resp.Body != nil {
+ resp.Body = TeeReadCloser(resp.Body, b)
+ }
+ return err
+ })
+ }
+}
+
+// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which
+// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed
+// Responder is invoked prior to discarding the response body, the decorator may occur anywhere
+// within the set.
+func ByDiscardingBody() RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil && resp != nil && resp.Body != nil {
+ if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
+ return fmt.Errorf("Error discarding the response body: %v", err)
+ }
+ }
+ return err
+ })
+ }
+}
+
+// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it
+// closes the response body. Since the passed Responder is invoked prior to closing the response
+// body, the decorator may occur anywhere within the set.
+func ByClosing() RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if resp != nil && resp.Body != nil {
+ if err := resp.Body.Close(); err != nil {
+ return fmt.Errorf("Error closing the response body: %v", err)
+ }
+ }
+ return err
+ })
+ }
+}
+
+// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which
+// it closes the response if the passed Responder returns an error and the response body exists.
+func ByClosingIfError() RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err != nil && resp != nil && resp.Body != nil {
+ if err := resp.Body.Close(); err != nil {
+ return fmt.Errorf("Error closing the response body: %v", err)
+ }
+ }
+ return err
+ })
+ }
+}
+
+// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the
+// response Body into the value pointed to by v.
+func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil {
+ bytes, errInner := ioutil.ReadAll(resp.Body)
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
+ } else {
+ *v = bytes
+ }
+ }
+ return err
+ })
+ }
+}
+
+// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
+// response Body into the value pointed to by v.
+func ByUnmarshallingJSON(v interface{}) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil {
+ b, errInner := ioutil.ReadAll(resp.Body)
+ // Some responses might include a BOM, remove for successful unmarshalling
+ b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
+ } else if len(strings.Trim(string(b), " ")) > 0 {
+ errInner = json.Unmarshal(b, v)
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b))
+ }
+ }
+ }
+ return err
+ })
+ }
+}
+
+// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the
+// response Body into the value pointed to by v.
+func ByUnmarshallingXML(v interface{}) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil {
+ b, errInner := ioutil.ReadAll(resp.Body)
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
+ } else {
+ errInner = xml.Unmarshal(b, v)
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b))
+ }
+ }
+ }
+ return err
+ })
+ }
+}
+
+// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response
+// StatusCode is among the set passed. On error, response body is fully read into a buffer and
+// presented in the returned error, as well as in the response body.
+func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil && !ResponseHasStatusCode(resp, codes...) {
+ derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
+ resp.Request.Method,
+ resp.Request.URL,
+ resp.Status)
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ b, _ := ioutil.ReadAll(resp.Body)
+ derr.ServiceError = b
+ resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+ err = derr
+ }
+ return err
+ })
+ }
+}
+
+// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is
+// anything other than HTTP 200.
+func WithErrorUnlessOK() RespondDecorator {
+ return WithErrorUnlessStatusCode(http.StatusOK)
+}
+
+// ExtractHeader extracts all values of the specified header from the http.Response. It returns an
+// empty string slice if the passed http.Response is nil or the header does not exist.
+func ExtractHeader(header string, resp *http.Response) []string {
+ if resp != nil && resp.Header != nil {
+ return resp.Header[http.CanonicalHeaderKey(header)]
+ }
+ return nil
+}
+
+// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It
+// returns an empty string if the passed http.Response is nil or the header does not exist.
+func ExtractHeaderValue(header string, resp *http.Response) string {
+ h := ExtractHeader(header, resp)
+ if len(h) > 0 {
+ return h[0]
+ }
+ return ""
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
new file mode 100644
index 0000000..fa11dbe
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
@@ -0,0 +1,52 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic.
+func NewRetriableRequest(req *http.Request) *RetriableRequest {
+ return &RetriableRequest{req: req}
+}
+
+// Request returns the wrapped HTTP request.
+func (rr *RetriableRequest) Request() *http.Request {
+ return rr.req
+}
+
+func (rr *RetriableRequest) prepareFromByteReader() (err error) {
+ // fall back to making a copy (only do this once)
+ b := []byte{}
+ if rr.req.ContentLength > 0 {
+ b = make([]byte, rr.req.ContentLength)
+ _, err = io.ReadFull(rr.req.Body, b)
+ if err != nil {
+ return err
+ }
+ } else {
+ b, err = ioutil.ReadAll(rr.req.Body)
+ if err != nil {
+ return err
+ }
+ }
+ rr.br = bytes.NewReader(b)
+ rr.req.Body = ioutil.NopCloser(rr.br)
+ return err
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
new file mode 100644
index 0000000..7143cc6
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
@@ -0,0 +1,54 @@
+// +build !go1.8
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package autorest
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+)
+
+// RetriableRequest provides facilities for retrying an HTTP request.
+type RetriableRequest struct {
+ req *http.Request
+ br *bytes.Reader
+}
+
+// Prepare signals that the request is about to be sent.
+func (rr *RetriableRequest) Prepare() (err error) {
+ // preserve the request body; this is to support retry logic as
+ // the underlying transport will always close the reqeust body
+ if rr.req.Body != nil {
+ if rr.br != nil {
+ _, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
+ rr.req.Body = ioutil.NopCloser(rr.br)
+ }
+ if err != nil {
+ return err
+ }
+ if rr.br == nil {
+ // fall back to making a copy (only do this once)
+ err = rr.prepareFromByteReader()
+ }
+ }
+ return err
+}
+
+func removeRequestBody(req *http.Request) {
+ req.Body = nil
+ req.ContentLength = 0
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
new file mode 100644
index 0000000..ae15c6b
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
@@ -0,0 +1,66 @@
+// +build go1.8
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package autorest
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// RetriableRequest provides facilities for retrying an HTTP request.
+type RetriableRequest struct {
+ req *http.Request
+ rc io.ReadCloser
+ br *bytes.Reader
+}
+
+// Prepare signals that the request is about to be sent.
+func (rr *RetriableRequest) Prepare() (err error) {
+ // preserve the request body; this is to support retry logic as
+ // the underlying transport will always close the reqeust body
+ if rr.req.Body != nil {
+ if rr.rc != nil {
+ rr.req.Body = rr.rc
+ } else if rr.br != nil {
+ _, err = rr.br.Seek(0, io.SeekStart)
+ rr.req.Body = ioutil.NopCloser(rr.br)
+ }
+ if err != nil {
+ return err
+ }
+ if rr.req.GetBody != nil {
+ // this will allow us to preserve the body without having to
+ // make a copy. note we need to do this on each iteration
+ rr.rc, err = rr.req.GetBody()
+ if err != nil {
+ return err
+ }
+ } else if rr.br == nil {
+ // fall back to making a copy (only do this once)
+ err = rr.prepareFromByteReader()
+ }
+ }
+ return err
+}
+
+func removeRequestBody(req *http.Request) {
+ req.Body = nil
+ req.GetBody = nil
+ req.ContentLength = 0
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go
new file mode 100644
index 0000000..5e595d7
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go
@@ -0,0 +1,407 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "log"
+ "math"
+ "net/http"
+ "net/http/cookiejar"
+ "strconv"
+ "time"
+
+ "github.com/Azure/go-autorest/tracing"
+)
+
+// used as a key type in context.WithValue()
+type ctxSendDecorators struct{}
+
+// WithSendDecorators adds the specified SendDecorators to the provided context.
+// If no SendDecorators are provided the context is unchanged.
+func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context {
+ if len(sendDecorator) == 0 {
+ return ctx
+ }
+ return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator)
+}
+
+// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators.
+func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator {
+ inCtx := ctx.Value(ctxSendDecorators{})
+ if sd, ok := inCtx.([]SendDecorator); ok {
+ return sd
+ }
+ return defaultSendDecorators
+}
+
+// Sender is the interface that wraps the Do method to send HTTP requests.
+//
+// The standard http.Client conforms to this interface.
+type Sender interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// SenderFunc is a method that implements the Sender interface.
+type SenderFunc func(*http.Request) (*http.Response, error)
+
+// Do implements the Sender interface on SenderFunc.
+func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
+ return sf(r)
+}
+
+// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
+// http.Request and pass it along or, first, pass the http.Request along then react to the
+// http.Response result.
+type SendDecorator func(Sender) Sender
+
+// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
+func CreateSender(decorators ...SendDecorator) Sender {
+ return DecorateSender(sender(tls.RenegotiateNever), decorators...)
+}
+
+// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
+// the Sender. Decorators are applied in the order received, but their affect upon the request
+// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
+// post-decorator (pass the http.Request along and react to the results in http.Response).
+func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
+ for _, decorate := range decorators {
+ s = decorate(s)
+ }
+ return s
+}
+
+// Send sends, by means of the default http.Client, the passed http.Request, returning the
+// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
+// it will apply the http.Client before invoking the Do method.
+//
+// Send is a convenience method and not recommended for production. Advanced users should use
+// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client).
+//
+// Send will not poll or retry requests.
+func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
+ return SendWithSender(sender(tls.RenegotiateNever), r, decorators...)
+}
+
+// SendWithSender sends the passed http.Request, through the provided Sender, returning the
+// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
+// it will apply the http.Client before invoking the Do method.
+//
+// SendWithSender will not poll or retry requests.
+func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
+ return DecorateSender(s, decorators...).Do(r)
+}
+
+func sender(renengotiation tls.RenegotiationSupport) Sender {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ transport := &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ Renegotiation: renengotiation,
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ return &http.Client{Jar: j, Transport: roundTripper}
+}
+
+// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
+// invoking the Sender. The delay may be terminated by closing the optional channel on the
+// http.Request. If canceled, no further Senders are invoked.
+func AfterDelay(d time.Duration) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ if !DelayForBackoff(d, 0, r.Context().Done()) {
+ return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
+ }
+ return s.Do(r)
+ })
+ }
+}
+
+// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request.
+func AsIs() SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return s.Do(r)
+ })
+ }
+}
+
+// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which
+// it closes the response if the passed Sender returns an error and the response body exists.
+func DoCloseIfError() SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := s.Do(r)
+ if err != nil {
+ Respond(resp, ByDiscardingBody(), ByClosing())
+ }
+ return resp, err
+ })
+ }
+}
+
+// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is
+// among the set passed. Since these are artificial errors, the response body may still require
+// closing.
+func DoErrorIfStatusCode(codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := s.Do(r)
+ if err == nil && ResponseHasStatusCode(resp, codes...) {
+ err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s",
+ resp.Request.Method,
+ resp.Request.URL,
+ resp.Status)
+ }
+ return resp, err
+ })
+ }
+}
+
+// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response
+// StatusCode is among the set passed. Since these are artificial errors, the response body
+// may still require closing.
+func DoErrorUnlessStatusCode(codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := s.Do(r)
+ if err == nil && !ResponseHasStatusCode(resp, codes...) {
+ err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s",
+ resp.Request.Method,
+ resp.Request.URL,
+ resp.Status)
+ }
+ return resp, err
+ })
+ }
+}
+
+// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the
+// passed status codes. It expects the http.Response to contain a Location header providing the
+// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than
+// the supplied duration. It will delay between requests for the duration specified in the
+// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
+// closing the optional channel on the http.Request.
+func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
+ resp, err = s.Do(r)
+
+ if err == nil && ResponseHasStatusCode(resp, codes...) {
+ r, err = NewPollingRequestWithContext(r.Context(), resp)
+
+ for err == nil && ResponseHasStatusCode(resp, codes...) {
+ Respond(resp,
+ ByDiscardingBody(),
+ ByClosing())
+ resp, err = SendWithSender(s, r,
+ AfterDelay(GetRetryAfter(resp, delay)))
+ }
+ }
+
+ return resp, err
+ })
+ }
+}
+
+// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified
+// number of attempts, exponentially backing off between requests using the supplied backoff
+// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
+// the http.Request.
+func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
+ rr := NewRetriableRequest(r)
+ for attempt := 0; attempt < attempts; attempt++ {
+ err = rr.Prepare()
+ if err != nil {
+ return resp, err
+ }
+ resp, err = s.Do(rr.Request())
+ if err == nil {
+ return resp, err
+ }
+ if !DelayForBackoff(backoff, attempt, r.Context().Done()) {
+ return nil, r.Context().Err()
+ }
+ }
+ return resp, err
+ })
+ }
+}
+
+// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
+// number of attempts, exponentially backing off between requests using the supplied backoff
+// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
+// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts.
+func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...)
+ })
+ }
+}
+
+// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the
+// specified number of attempts, exponentially backing off between requests using the supplied backoff
+// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater
+// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request.
+func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...)
+ })
+ }
+}
+
+func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) {
+ rr := NewRetriableRequest(r)
+ // Increment to add the first call (attempts denotes number of retries)
+ for attempt := 0; attempt < attempts+1; {
+ err = rr.Prepare()
+ if err != nil {
+ return
+ }
+ resp, err = s.Do(rr.Request())
+ // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
+ // resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
+ if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
+ return resp, err
+ }
+ delayed := DelayWithRetryAfter(resp, r.Context().Done())
+ if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) {
+ return resp, r.Context().Err()
+ }
+ // when count429 == false don't count a 429 against the number
+ // of attempts so that we continue to retry until it succeeds
+ if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) {
+ attempt++
+ }
+ }
+ return resp, err
+}
+
+// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header.
+// The value of Retry-After can be either the number of seconds or a date in RFC1123 format.
+// The function returns true after successfully waiting for the specified duration. If there is
+// no Retry-After header or the wait is cancelled the return value is false.
+func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
+ if resp == nil {
+ return false
+ }
+ var dur time.Duration
+ ra := resp.Header.Get("Retry-After")
+ if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
+ dur = time.Duration(retryAfter) * time.Second
+ } else if t, err := time.Parse(time.RFC1123, ra); err == nil {
+ dur = t.Sub(time.Now())
+ }
+ if dur > 0 {
+ select {
+ case <-time.After(dur):
+ return true
+ case <-cancel:
+ return false
+ }
+ }
+ return false
+}
+
+// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
+// to or greater than the specified duration, exponentially backing off between requests using the
+// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
+// optional channel on the http.Request.
+func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
+ rr := NewRetriableRequest(r)
+ end := time.Now().Add(d)
+ for attempt := 0; time.Now().Before(end); attempt++ {
+ err = rr.Prepare()
+ if err != nil {
+ return resp, err
+ }
+ resp, err = s.Do(rr.Request())
+ if err == nil {
+ return resp, err
+ }
+ if !DelayForBackoff(backoff, attempt, r.Context().Done()) {
+ return nil, r.Context().Err()
+ }
+ }
+ return resp, err
+ })
+ }
+}
+
+// WithLogging returns a SendDecorator that implements simple before and after logging of the
+// request.
+func WithLogging(logger *log.Logger) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ logger.Printf("Sending %s %s", r.Method, r.URL)
+ resp, err := s.Do(r)
+ if err != nil {
+ logger.Printf("%s %s received error '%v'", r.Method, r.URL, err)
+ } else {
+ logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status)
+ }
+ return resp, err
+ })
+ }
+}
+
+// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of
+// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
+// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early,
+// returns false.
+// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
+// count.
+func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
+ return DelayForBackoffWithCap(backoff, 0, attempt, cancel)
+}
+
+// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of
+// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
+// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap.
+// The delay may be canceled by closing the passed channel. If terminated early, returns false.
+// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
+// count.
+func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool {
+ d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second
+ if cap > 0 && d > cap {
+ d = cap
+ }
+ select {
+ case <-time.After(d):
+ return true
+ case <-cancel:
+ return false
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
new file mode 100644
index 0000000..86694bd
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
@@ -0,0 +1,152 @@
+/*
+Package to provides helpers to ease working with pointer values of marshalled structures.
+*/
+package to
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// String returns a string value for the passed string pointer. It returns the empty string if the
+// pointer is nil.
+func String(s *string) string {
+ if s != nil {
+ return *s
+ }
+ return ""
+}
+
+// StringPtr returns a pointer to the passed string.
+func StringPtr(s string) *string {
+ return &s
+}
+
+// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil
+// slice if the pointer is nil.
+func StringSlice(s *[]string) []string {
+ if s != nil {
+ return *s
+ }
+ return nil
+}
+
+// StringSlicePtr returns a pointer to the passed string slice.
+func StringSlicePtr(s []string) *[]string {
+ return &s
+}
+
+// StringMap returns a map of strings built from the map of string pointers. The empty string is
+// used for nil pointers.
+func StringMap(msp map[string]*string) map[string]string {
+ ms := make(map[string]string, len(msp))
+ for k, sp := range msp {
+ if sp != nil {
+ ms[k] = *sp
+ } else {
+ ms[k] = ""
+ }
+ }
+ return ms
+}
+
+// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings.
+func StringMapPtr(ms map[string]string) *map[string]*string {
+ msp := make(map[string]*string, len(ms))
+ for k, s := range ms {
+ msp[k] = StringPtr(s)
+ }
+ return &msp
+}
+
+// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil.
+func Bool(b *bool) bool {
+ if b != nil {
+ return *b
+ }
+ return false
+}
+
+// BoolPtr returns a pointer to the passed bool.
+func BoolPtr(b bool) *bool {
+ return &b
+}
+
+// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil.
+func Int(i *int) int {
+ if i != nil {
+ return *i
+ }
+ return 0
+}
+
+// IntPtr returns a pointer to the passed int.
+func IntPtr(i int) *int {
+ return &i
+}
+
+// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil.
+func Int32(i *int32) int32 {
+ if i != nil {
+ return *i
+ }
+ return 0
+}
+
+// Int32Ptr returns a pointer to the passed int32.
+func Int32Ptr(i int32) *int32 {
+ return &i
+}
+
+// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil.
+func Int64(i *int64) int64 {
+ if i != nil {
+ return *i
+ }
+ return 0
+}
+
+// Int64Ptr returns a pointer to the passed int64.
+func Int64Ptr(i int64) *int64 {
+ return &i
+}
+
+// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil.
+func Float32(i *float32) float32 {
+ if i != nil {
+ return *i
+ }
+ return 0.0
+}
+
+// Float32Ptr returns a pointer to the passed float32.
+func Float32Ptr(i float32) *float32 {
+ return &i
+}
+
+// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil.
+func Float64(i *float64) float64 {
+ if i != nil {
+ return *i
+ }
+ return 0.0
+}
+
+// Float64Ptr returns a pointer to the passed float64.
+func Float64Ptr(i float64) *float64 {
+ return &i
+}
+
+// ByteSlicePtr returns a pointer to the passed byte slice.
+func ByteSlicePtr(b []byte) *[]byte {
+ return &b
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
new file mode 100644
index 0000000..48fd8c6
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
@@ -0,0 +1,5 @@
+module github.com/Azure/go-autorest/autorest/to
+
+go 1.12
+
+require github.com/Azure/go-autorest/autorest v0.9.0
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
new file mode 100644
index 0000000..d7ee6b4
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
@@ -0,0 +1,17 @@
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
new file mode 100644
index 0000000..8e82921
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package to
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest/autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go
new file mode 100644
index 0000000..08cf11c
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go
@@ -0,0 +1,228 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+
+ "github.com/Azure/go-autorest/autorest/adal"
+)
+
+// EncodedAs is a series of constants specifying various data encodings
+type EncodedAs string
+
+const (
+ // EncodedAsJSON states that data is encoded as JSON
+ EncodedAsJSON EncodedAs = "JSON"
+
+ // EncodedAsXML states that data is encoded as Xml
+ EncodedAsXML EncodedAs = "XML"
+)
+
+// Decoder defines the decoding method json.Decoder and xml.Decoder share
+type Decoder interface {
+ Decode(v interface{}) error
+}
+
+// NewDecoder creates a new decoder appropriate to the passed encoding.
+// encodedAs specifies the type of encoding and r supplies the io.Reader containing the
+// encoded data.
+func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder {
+ if encodedAs == EncodedAsJSON {
+ return json.NewDecoder(r)
+ } else if encodedAs == EncodedAsXML {
+ return xml.NewDecoder(r)
+ }
+ return nil
+}
+
+// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy
+// is especially useful if there is a chance the data will fail to decode.
+// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v
+// is the decoding destination.
+func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) {
+ b := bytes.Buffer{}
+ return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
+}
+
+// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc.
+// It utilizes io.TeeReader to copy the data read and has the same behavior when reading.
+// Further, when it is closed, it ensures that rc is closed as well.
+func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser {
+ return &teeReadCloser{rc, io.TeeReader(rc, w)}
+}
+
+type teeReadCloser struct {
+ rc io.ReadCloser
+ r io.Reader
+}
+
+func (t *teeReadCloser) Read(p []byte) (int, error) {
+ return t.r.Read(p)
+}
+
+func (t *teeReadCloser) Close() error {
+ return t.rc.Close()
+}
+
+func containsInt(ints []int, n int) bool {
+ for _, i := range ints {
+ if i == n {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeValueStrings(m map[string]string) map[string]string {
+ for key, value := range m {
+ m[key] = url.QueryEscape(value)
+ }
+ return m
+}
+
+func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string {
+ mapOfStrings := make(map[string]string)
+ for key, value := range mapOfInterface {
+ mapOfStrings[key] = ensureValueString(value)
+ }
+ return mapOfStrings
+}
+
+func ensureValueString(value interface{}) string {
+ if value == nil {
+ return ""
+ }
+ switch v := value.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+// MapToValues method converts map[string]interface{} to url.Values.
+func MapToValues(m map[string]interface{}) url.Values {
+ v := url.Values{}
+ for key, value := range m {
+ x := reflect.ValueOf(value)
+ if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
+ for i := 0; i < x.Len(); i++ {
+ v.Add(key, ensureValueString(x.Index(i)))
+ }
+ } else {
+ v.Add(key, ensureValueString(value))
+ }
+ }
+ return v
+}
+
+// AsStringSlice method converts interface{} to []string. This expects a
+//that the parameter passed to be a slice or array of a type that has the underlying
+//type a string.
+func AsStringSlice(s interface{}) ([]string, error) {
+ v := reflect.ValueOf(s)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.")
+ }
+ stringSlice := make([]string, 0, v.Len())
+
+ for i := 0; i < v.Len(); i++ {
+ stringSlice = append(stringSlice, v.Index(i).String())
+ }
+ return stringSlice, nil
+}
+
+// String method converts interface v to string. If interface is a list, it
+// joins list elements using the separator. Note that only sep[0] will be used for
+// joining if any separator is specified.
+func String(v interface{}, sep ...string) string {
+ if len(sep) == 0 {
+ return ensureValueString(v)
+ }
+ stringSlice, ok := v.([]string)
+ if ok == false {
+ var err error
+ stringSlice, err = AsStringSlice(v)
+ if err != nil {
+ panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err))
+ }
+ }
+ return ensureValueString(strings.Join(stringSlice, sep[0]))
+}
+
+// Encode method encodes url path and query parameters.
+func Encode(location string, v interface{}, sep ...string) string {
+ s := String(v, sep...)
+ switch strings.ToLower(location) {
+ case "path":
+ return pathEscape(s)
+ case "query":
+ return queryEscape(s)
+ default:
+ return s
+ }
+}
+
+func pathEscape(s string) string {
+ return strings.Replace(url.QueryEscape(s), "+", "%20", -1)
+}
+
+func queryEscape(s string) string {
+ return url.QueryEscape(s)
+}
+
+// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't).
+// This is mainly useful for long-running operations that use the Azure-AsyncOperation
+// header, so we change the initial PUT into a GET to retrieve the final result.
+func ChangeToGet(req *http.Request) *http.Request {
+ req.Method = "GET"
+ req.Body = nil
+ req.ContentLength = 0
+ req.Header.Del("Content-Length")
+ return req
+}
+
+// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError
+// interface. If err is a DetailedError it will walk the chain of Original errors.
+func IsTokenRefreshError(err error) bool {
+ if _, ok := err.(adal.TokenRefreshError); ok {
+ return true
+ }
+ if de, ok := err.(DetailedError); ok {
+ return IsTokenRefreshError(de.Original)
+ }
+ return false
+}
+
+// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false
+// if it's not. If the error doesn't implement the net.Error interface the return value is true.
+func IsTemporaryNetworkError(err error) bool {
+ if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go
new file mode 100644
index 0000000..fed156d
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go
@@ -0,0 +1,48 @@
+package validation
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "fmt"
+)
+
+// Error is the type that's returned when the validation of an APIs arguments constraints fails.
+type Error struct {
+ // PackageType is the package type of the object emitting the error. For types, the value
+ // matches that produced the the '%T' format specifier of the fmt package. For other elements,
+ // such as functions, it is just the package name (e.g., "autorest").
+ PackageType string
+
+ // Method is the name of the method raising the error.
+ Method string
+
+ // Message is the error message.
+ Message string
+}
+
+// Error returns a string containing the details of the validation failure.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message)
+}
+
+// NewError creates a new Error object with the specified parameters.
+// message is treated as a format string to which the optional args apply.
+func NewError(packageType string, method string, message string, args ...interface{}) Error {
+ return Error{
+ PackageType: packageType,
+ Method: method,
+ Message: fmt.Sprintf(message, args...),
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
new file mode 100644
index 0000000..b3f9b6a
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
@@ -0,0 +1,8 @@
+module github.com/Azure/go-autorest/autorest/validation
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest v0.9.0
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
new file mode 100644
index 0000000..6b9010a
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
@@ -0,0 +1,24 @@
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
new file mode 100644
index 0000000..2b26685
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package validation
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest/autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
new file mode 100644
index 0000000..65899b6
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
@@ -0,0 +1,400 @@
+/*
+Package validation provides methods for validating parameter value using reflection.
+*/
+package validation
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+)
+
+// Constraint stores constraint name, target field name
+// Rule and chain validations.
+type Constraint struct {
+
+ // Target field name for validation.
+ Target string
+
+ // Constraint name e.g. minLength, MaxLength, Pattern, etc.
+ Name string
+
+ // Rule for constraint e.g. greater than 10, less than 5 etc.
+ Rule interface{}
+
+ // Chain Validations for struct type
+ Chain []Constraint
+}
+
+// Validation stores parameter-wise validation.
+type Validation struct {
+ TargetValue interface{}
+ Constraints []Constraint
+}
+
+// Constraint list
+const (
+ Empty = "Empty"
+ Null = "Null"
+ ReadOnly = "ReadOnly"
+ Pattern = "Pattern"
+ MaxLength = "MaxLength"
+ MinLength = "MinLength"
+ MaxItems = "MaxItems"
+ MinItems = "MinItems"
+ MultipleOf = "MultipleOf"
+ UniqueItems = "UniqueItems"
+ InclusiveMaximum = "InclusiveMaximum"
+ ExclusiveMaximum = "ExclusiveMaximum"
+ ExclusiveMinimum = "ExclusiveMinimum"
+ InclusiveMinimum = "InclusiveMinimum"
+)
+
+// Validate method validates constraints on parameter
+// passed in validation array.
+func Validate(m []Validation) error {
+ for _, item := range m {
+ v := reflect.ValueOf(item.TargetValue)
+ for _, constraint := range item.Constraints {
+ var err error
+ switch v.Kind() {
+ case reflect.Ptr:
+ err = validatePtr(v, constraint)
+ case reflect.String:
+ err = validateString(v, constraint)
+ case reflect.Struct:
+ err = validateStruct(v, constraint)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ err = validateInt(v, constraint)
+ case reflect.Float32, reflect.Float64:
+ err = validateFloat(v, constraint)
+ case reflect.Array, reflect.Slice, reflect.Map:
+ err = validateArrayMap(v, constraint)
+ default:
+ err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func validateStruct(x reflect.Value, v Constraint, name ...string) error {
+ //Get field name from target name which is in format a.b.c
+ s := strings.Split(v.Target, ".")
+ f := x.FieldByName(s[len(s)-1])
+ if isZero(f) {
+ return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target))
+ }
+
+ return Validate([]Validation{
+ {
+ TargetValue: getInterfaceValue(f),
+ Constraints: []Constraint{v},
+ },
+ })
+}
+
+func validatePtr(x reflect.Value, v Constraint) error {
+ if v.Name == ReadOnly {
+ if !x.IsNil() {
+ return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request")
+ }
+ return nil
+ }
+ if x.IsNil() {
+ return checkNil(x, v)
+ }
+ if v.Chain != nil {
+ return Validate([]Validation{
+ {
+ TargetValue: getInterfaceValue(x.Elem()),
+ Constraints: v.Chain,
+ },
+ })
+ }
+ return nil
+}
+
+func validateInt(x reflect.Value, v Constraint) error {
+ i := x.Int()
+ r, ok := toInt64(v.Rule)
+ if !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ switch v.Name {
+ case MultipleOf:
+ if i%r != 0 {
+ return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r))
+ }
+ case ExclusiveMinimum:
+ if i <= r {
+ return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
+ }
+ case ExclusiveMaximum:
+ if i >= r {
+ return createError(x, v, fmt.Sprintf("value must be less than %v", r))
+ }
+ case InclusiveMinimum:
+ if i < r {
+ return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
+ }
+ case InclusiveMaximum:
+ if i > r {
+ return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name))
+ }
+ return nil
+}
+
+func validateFloat(x reflect.Value, v Constraint) error {
+ f := x.Float()
+ r, ok := v.Rule.(float64)
+ if !ok {
+ return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ switch v.Name {
+ case ExclusiveMinimum:
+ if f <= r {
+ return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
+ }
+ case ExclusiveMaximum:
+ if f >= r {
+ return createError(x, v, fmt.Sprintf("value must be less than %v", r))
+ }
+ case InclusiveMinimum:
+ if f < r {
+ return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
+ }
+ case InclusiveMaximum:
+ if f > r {
+ return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name))
+ }
+ return nil
+}
+
+func validateString(x reflect.Value, v Constraint) error {
+ s := x.String()
+ switch v.Name {
+ case Empty:
+ if len(s) == 0 {
+ return checkEmpty(x, v)
+ }
+ case Pattern:
+ reg, err := regexp.Compile(v.Rule.(string))
+ if err != nil {
+ return createError(x, v, err.Error())
+ }
+ if !reg.MatchString(s) {
+ return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule))
+ }
+ case MaxLength:
+ if _, ok := v.Rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ if len(s) > v.Rule.(int) {
+ return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule))
+ }
+ case MinLength:
+ if _, ok := v.Rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ if len(s) < v.Rule.(int) {
+ return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule))
+ }
+ case ReadOnly:
+ if len(s) > 0 {
+ return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request")
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name))
+ }
+
+ if v.Chain != nil {
+ return Validate([]Validation{
+ {
+ TargetValue: getInterfaceValue(x),
+ Constraints: v.Chain,
+ },
+ })
+ }
+ return nil
+}
+
+func validateArrayMap(x reflect.Value, v Constraint) error {
+ switch v.Name {
+ case Null:
+ if x.IsNil() {
+ return checkNil(x, v)
+ }
+ case Empty:
+ if x.IsNil() || x.Len() == 0 {
+ return checkEmpty(x, v)
+ }
+ case MaxItems:
+ if _, ok := v.Rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ if x.Len() > v.Rule.(int) {
+ return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len()))
+ }
+ case MinItems:
+ if _, ok := v.Rule.(int); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ if x.Len() < v.Rule.(int) {
+ return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len()))
+ }
+ case UniqueItems:
+ if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
+ if !checkForUniqueInArray(x) {
+ return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x))
+ }
+ } else if x.Kind() == reflect.Map {
+ if !checkForUniqueInMap(x) {
+ return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x))
+ }
+ } else {
+ return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind()))
+ }
+ case ReadOnly:
+ if x.Len() != 0 {
+ return createError(x, v, "readonly parameter; must send as nil or empty in request")
+ }
+ case Pattern:
+ reg, err := regexp.Compile(v.Rule.(string))
+ if err != nil {
+ return createError(x, v, err.Error())
+ }
+ keys := x.MapKeys()
+ for _, k := range keys {
+ if !reg.MatchString(k.String()) {
+ return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule))
+ }
+ }
+ default:
+ return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name))
+ }
+
+ if v.Chain != nil {
+ return Validate([]Validation{
+ {
+ TargetValue: getInterfaceValue(x),
+ Constraints: v.Chain,
+ },
+ })
+ }
+ return nil
+}
+
+func checkNil(x reflect.Value, v Constraint) error {
+ if _, ok := v.Rule.(bool); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule))
+ }
+ if v.Rule.(bool) {
+ return createError(x, v, "value can not be null; required parameter")
+ }
+ return nil
+}
+
+func checkEmpty(x reflect.Value, v Constraint) error {
+ if _, ok := v.Rule.(bool); !ok {
+ return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule))
+ }
+
+ if v.Rule.(bool) {
+ return createError(x, v, "value can not be null or empty; required parameter")
+ }
+ return nil
+}
+
+func checkForUniqueInArray(x reflect.Value) bool {
+ if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
+ return false
+ }
+ arrOfInterface := make([]interface{}, x.Len())
+
+ for i := 0; i < x.Len(); i++ {
+ arrOfInterface[i] = x.Index(i).Interface()
+ }
+
+ m := make(map[interface{}]bool)
+ for _, val := range arrOfInterface {
+ if m[val] {
+ return false
+ }
+ m[val] = true
+ }
+ return true
+}
+
+func checkForUniqueInMap(x reflect.Value) bool {
+ if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
+ return false
+ }
+ mapOfInterface := make(map[interface{}]interface{}, x.Len())
+
+ keys := x.MapKeys()
+ for _, k := range keys {
+ mapOfInterface[k.Interface()] = x.MapIndex(k).Interface()
+ }
+
+ m := make(map[interface{}]bool)
+ for _, val := range mapOfInterface {
+ if m[val] {
+ return false
+ }
+ m[val] = true
+ }
+ return true
+}
+
+func getInterfaceValue(x reflect.Value) interface{} {
+ if x.Kind() == reflect.Invalid {
+ return nil
+ }
+ return x.Interface()
+}
+
+func isZero(x interface{}) bool {
+ return x == reflect.Zero(reflect.TypeOf(x)).Interface()
+}
+
+func createError(x reflect.Value, v Constraint, err string) error {
+ return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s",
+ v.Target, v.Name, getInterfaceValue(x), err)
+}
+
+func toInt64(v interface{}) (int64, bool) {
+ if i64, ok := v.(int64); ok {
+ return i64, true
+ }
+ // older generators emit max constants as int, so if int64 fails fall back to int
+ if i32, ok := v.(int); ok {
+ return int64(i32), true
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go
new file mode 100644
index 0000000..7a71089
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/version.go
@@ -0,0 +1,41 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "fmt"
+ "runtime"
+)
+
+const number = "v13.0.2"
+
+var (
+ userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
+ runtime.Version(),
+ runtime.GOARCH,
+ runtime.GOOS,
+ number,
+ )
+)
+
+// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version.
+func UserAgent() string {
+ return userAgent
+}
+
+// Version returns the semantic version (see http://semver.org).
+func Version() string {
+ return number
+}
diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod
new file mode 100644
index 0000000..f22ed56
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/go.mod
@@ -0,0 +1,3 @@
+module github.com/Azure/go-autorest/logger
+
+go 1.12
diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go
new file mode 100644
index 0000000..da09f39
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/logger.go
@@ -0,0 +1,328 @@
+package logger
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "time"
+)
+
+// LevelType tells a logger the minimum level to log. When code reports a log entry,
+// the LogLevel indicates the level of the log entry. The logger only records entries
+// whose level is at least the level it was told to log. See the Log* constants.
+// For example, if a logger is configured with LogError, then LogError, LogPanic,
+// and LogFatal entries will be logged; lower level entries are ignored.
+type LevelType uint32
+
+const (
+ // LogNone tells a logger not to log any entries passed to it.
+ LogNone LevelType = iota
+
+ // LogFatal tells a logger to log all LogFatal entries passed to it.
+ LogFatal
+
+ // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
+ LogPanic
+
+ // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
+ LogError
+
+ // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
+ LogWarning
+
+ // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
+ LogInfo
+
+ // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
+ LogDebug
+)
+
+const (
+ logNone = "NONE"
+ logFatal = "FATAL"
+ logPanic = "PANIC"
+ logError = "ERROR"
+ logWarning = "WARNING"
+ logInfo = "INFO"
+ logDebug = "DEBUG"
+ logUnknown = "UNKNOWN"
+)
+
+// ParseLevel converts the specified string into the corresponding LevelType.
+func ParseLevel(s string) (lt LevelType, err error) {
+ switch strings.ToUpper(s) {
+ case logFatal:
+ lt = LogFatal
+ case logPanic:
+ lt = LogPanic
+ case logError:
+ lt = LogError
+ case logWarning:
+ lt = LogWarning
+ case logInfo:
+ lt = LogInfo
+ case logDebug:
+ lt = LogDebug
+ default:
+ err = fmt.Errorf("bad log level '%s'", s)
+ }
+ return
+}
+
+// String implements the stringer interface for LevelType.
+func (lt LevelType) String() string {
+ switch lt {
+ case LogNone:
+ return logNone
+ case LogFatal:
+ return logFatal
+ case LogPanic:
+ return logPanic
+ case LogError:
+ return logError
+ case LogWarning:
+ return logWarning
+ case LogInfo:
+ return logInfo
+ case LogDebug:
+ return logDebug
+ default:
+ return logUnknown
+ }
+}
+
+// Filter defines functions for filtering HTTP request/response content.
+type Filter struct {
+ // URL returns a potentially modified string representation of a request URL.
+ URL func(u *url.URL) string
+
+ // Header returns a potentially modified set of values for the specified key.
+ // To completely exclude the header key/values return false.
+ Header func(key string, val []string) (bool, []string)
+
+ // Body returns a potentially modified request/response body.
+ Body func(b []byte) []byte
+}
+
+func (f Filter) processURL(u *url.URL) string {
+ if f.URL == nil {
+ return u.String()
+ }
+ return f.URL(u)
+}
+
+func (f Filter) processHeader(k string, val []string) (bool, []string) {
+ if f.Header == nil {
+ return true, val
+ }
+ return f.Header(k, val)
+}
+
+func (f Filter) processBody(b []byte) []byte {
+ if f.Body == nil {
+ return b
+ }
+ return f.Body(b)
+}
+
+// Writer defines methods for writing to a logging facility.
+type Writer interface {
+ // Writeln writes the specified message with the standard log entry header and new-line character.
+ Writeln(level LevelType, message string)
+
+ // Writef writes the specified format specifier with the standard log entry header and no new-line character.
+ Writef(level LevelType, format string, a ...interface{})
+
+ // WriteRequest writes the specified HTTP request to the logger if the log level is greater than
+ // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher.
+ // Custom filters can be specified to exclude URL, header, and/or body content from the log.
+ // By default no request content is excluded.
+ WriteRequest(req *http.Request, filter Filter)
+
+ // WriteResponse writes the specified HTTP response to the logger if the log level is greater than
+ // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
+ // Custom filters can be specified to exclude URL, header, and/or body content from the log.
+ // By default no response content is excluded.
+ WriteResponse(resp *http.Response, filter Filter)
+}
+
+// Instance is the default log writer initialized during package init.
+// This can be replaced with a custom implementation as required.
+var Instance Writer
+
+// default log level
+var logLevel = LogNone
+
+// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL.
+// If no value was specified the default value is LogNone.
+// Custom loggers can call this to retrieve the configured log level.
+func Level() LevelType {
+ return logLevel
+}
+
+func init() {
+ // separated for testing purposes
+ initDefaultLogger()
+}
+
+func initDefaultLogger() {
+ // init with nilLogger so callers don't have to do a nil check on Default
+ Instance = nilLogger{}
+ llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL"))
+ if llStr == "" {
+ return
+ }
+ var err error
+ logLevel, err = ParseLevel(llStr)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error())
+ return
+ }
+ if logLevel == LogNone {
+ return
+ }
+ // default to stderr
+ dest := os.Stderr
+ lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE")
+ if strings.EqualFold(lfStr, "stdout") {
+ dest = os.Stdout
+ } else if lfStr != "" {
+ lf, err := os.Create(lfStr)
+ if err == nil {
+ dest = lf
+ } else {
+ fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error())
+ }
+ }
+ Instance = fileLogger{
+ logLevel: logLevel,
+ mu: &sync.Mutex{},
+ logFile: dest,
+ }
+}
+
+// the nil logger does nothing
+type nilLogger struct{}
+
+func (nilLogger) Writeln(LevelType, string) {}
+
+func (nilLogger) Writef(LevelType, string, ...interface{}) {}
+
+func (nilLogger) WriteRequest(*http.Request, Filter) {}
+
+func (nilLogger) WriteResponse(*http.Response, Filter) {}
+
+// A File is used instead of a Logger so the stream can be flushed after every write.
+type fileLogger struct {
+ logLevel LevelType
+ mu *sync.Mutex // for synchronizing writes to logFile
+ logFile *os.File
+}
+
+func (fl fileLogger) Writeln(level LevelType, message string) {
+ fl.Writef(level, "%s\n", message)
+}
+
+func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) {
+ if fl.logLevel >= level {
+ fl.mu.Lock()
+ defer fl.mu.Unlock()
+ fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...))
+ fl.logFile.Sync()
+ }
+}
+
+func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
+ if req == nil || fl.logLevel < LogInfo {
+ return
+ }
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL))
+ // dump headers
+ for k, v := range req.Header {
+ if ok, mv := filter.processHeader(k, v); ok {
+ fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
+ }
+ }
+ if fl.shouldLogBody(req.Header, req.Body) {
+ // dump body
+ body, err := ioutil.ReadAll(req.Body)
+ if err == nil {
+ fmt.Fprintln(b, string(filter.processBody(body)))
+ if nc, ok := req.Body.(io.Seeker); ok {
+ // rewind to the beginning
+ nc.Seek(0, io.SeekStart)
+ } else {
+ // recreate the body
+ req.Body = ioutil.NopCloser(bytes.NewReader(body))
+ }
+ } else {
+ fmt.Fprintf(b, "failed to read body: %v\n", err)
+ }
+ }
+ fl.mu.Lock()
+ defer fl.mu.Unlock()
+ fmt.Fprint(fl.logFile, b.String())
+ fl.logFile.Sync()
+}
+
+func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
+ if resp == nil || fl.logLevel < LogInfo {
+ return
+ }
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL))
+ // dump headers
+ for k, v := range resp.Header {
+ if ok, mv := filter.processHeader(k, v); ok {
+ fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
+ }
+ }
+ if fl.shouldLogBody(resp.Header, resp.Body) {
+ // dump body
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err == nil {
+ fmt.Fprintln(b, string(filter.processBody(body)))
+ resp.Body = ioutil.NopCloser(bytes.NewReader(body))
+ } else {
+ fmt.Fprintf(b, "failed to read body: %v\n", err)
+ }
+ }
+ fl.mu.Lock()
+ defer fl.mu.Unlock()
+ fmt.Fprint(fl.logFile, b.String())
+ fl.logFile.Sync()
+}
+
+// returns true if the provided body should be included in the log
+func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
+ ct := header.Get("Content-Type")
+ return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream")
+}
+
+// creates standard header for log entries, it contains a timestamp and the log level
+func entryHeader(level LevelType) string {
+ // this format provides a fixed number of digits so the size of the timestamp is constant
+ return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String())
+}
diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE
new file mode 100644
index 0000000..b9d6a27
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod
new file mode 100644
index 0000000..25c34c1
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod
@@ -0,0 +1,3 @@
+module github.com/Azure/go-autorest/tracing
+
+go 1.12
diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
new file mode 100644
index 0000000..0e7a6e9
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
@@ -0,0 +1,67 @@
+package tracing
+
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "context"
+ "net/http"
+)
+
+// Tracer represents an HTTP tracing facility.
+type Tracer interface {
+ NewTransport(base *http.Transport) http.RoundTripper
+ StartSpan(ctx context.Context, name string) context.Context
+ EndSpan(ctx context.Context, httpStatusCode int, err error)
+}
+
+var (
+ tracer Tracer
+)
+
+// Register will register the provided Tracer. Pass nil to unregister a Tracer.
+func Register(t Tracer) {
+ tracer = t
+}
+
+// IsEnabled returns true if a Tracer has been registered.
+func IsEnabled() bool {
+ return tracer != nil
+}
+
+// NewTransport creates a new instrumenting http.RoundTripper for the
+// registered Tracer. If no Tracer has been registered it returns nil.
+func NewTransport(base *http.Transport) http.RoundTripper {
+ if tracer != nil {
+ return tracer.NewTransport(base)
+ }
+ return nil
+}
+
+// StartSpan starts a trace span with the specified name, associating it with the
+// provided context. Has no effect if a Tracer has not been registered.
+func StartSpan(ctx context.Context, name string) context.Context {
+ if tracer != nil {
+ return tracer.StartSpan(ctx, name)
+ }
+ return ctx
+}
+
+// EndSpan ends a previously started span stored in the context.
+// Has no effect if a Tracer has not been registered.
+func EndSpan(ctx context.Context, httpStatusCode int, err error) {
+ if tracer != nil {
+ tracer.EndSpan(ctx, httpStatusCode, err)
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore
new file mode 100644
index 0000000..80bed65
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+
+
diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml
new file mode 100644
index 0000000..1027f56
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+script:
+ - go vet ./...
+ - go test -v ./...
+
+go:
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - tip
diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE
new file mode 100644
index 0000000..df83a9c
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE
@@ -0,0 +1,8 @@
+Copyright (c) 2012 Dave Grijalva
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
new file mode 100644
index 0000000..7fc1f79
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
@@ -0,0 +1,97 @@
+## Migration Guide from v2 -> v3
+
+Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
+
+### `Token.Claims` is now an interface type
+
+The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
+
+`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
+
+The old example for parsing a token looked like this..
+
+```go
+ if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
+ fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
+ }
+```
+
+is now directly mapped to...
+
+```go
+ if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
+ claims := token.Claims.(jwt.MapClaims)
+ fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
+ }
+```
+
+`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
+
+```go
+ type MyCustomClaims struct {
+ User string
+ *StandardClaims
+ }
+
+ if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
+ claims := token.Claims.(*MyCustomClaims)
+ fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
+ }
+```
+
+### `ParseFromRequest` has been moved
+
+To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
+
+`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
+
+This simple parsing example:
+
+```go
+ if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
+ fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
+ }
+```
+
+is directly mapped to:
+
+```go
+ if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
+ claims := token.Claims.(jwt.MapClaims)
+ fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
+ }
+```
+
+There are several concrete `Extractor` types provided for your convenience:
+
+* `HeaderExtractor` will search a list of headers until one contains content.
+* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
+* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
+* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
+* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
+* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
+
+
+### RSA signing methods no longer accept `[]byte` keys
+
+Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
+
+To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
+
+```go
+ func keyLookupFunc(*Token) (interface{}, error) {
+ // Don't forget to validate the alg is what you expect:
+ if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
+ return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
+ }
+
+ // Look up key
+ key, err := lookupPublicKey(token.Header["kid"])
+ if err != nil {
+ return nil, err
+ }
+
+ // Unpack key from PEM encoded PKCS8
+ return jwt.ParseRSAPublicKeyFromPEM(key)
+ }
+```
diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md
new file mode 100644
index 0000000..d358d88
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/README.md
@@ -0,0 +1,100 @@
+# jwt-go
+
+[](https://travis-ci.org/dgrijalva/jwt-go)
+[](https://godoc.org/github.com/dgrijalva/jwt-go)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
+
+**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Examples
+
+See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
+* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
+* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
+
+Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
+
+## Compliance
+
+This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
+
+While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
+
+**BREAKING CHANGES:***
+* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+## More
+
+Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
new file mode 100644
index 0000000..6370298
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
@@ -0,0 +1,118 @@
+## `jwt-go` Version History
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+#### 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+#### 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+#### 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
\ No newline at end of file
diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go
new file mode 100644
index 0000000..f0228f0
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/claims.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// For a type to be a Claims object, it must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+ Valid() error
+}
+
+// Structured version of Claims Section, as referenced at
+// https://tools.ietf.org/html/rfc7519#section-4.1
+// See examples for how to use this with your own claim types
+type StandardClaims struct {
+ Audience string `json:"aud,omitempty"`
+ ExpiresAt int64 `json:"exp,omitempty"`
+ Id string `json:"jti,omitempty"`
+ IssuedAt int64 `json:"iat,omitempty"`
+ Issuer string `json:"iss,omitempty"`
+ NotBefore int64 `json:"nbf,omitempty"`
+ Subject string `json:"sub,omitempty"`
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if c.VerifyExpiresAt(now, false) == false {
+ delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+ vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if c.VerifyIssuedAt(now, false) == false {
+ vErr.Inner = fmt.Errorf("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if c.VerifyNotBefore(now, false) == false {
+ vErr.Inner = fmt.Errorf("token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud(c.Audience, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ return verifyExp(c.ExpiresAt, cmp, req)
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ return verifyIat(c.IssuedAt, cmp, req)
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ return verifyNbf(c.NotBefore, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud string, cmp string, required bool) bool {
+ if aud == "" {
+ return !required
+ }
+ if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
+ return true
+ } else {
+ return false
+ }
+}
+
+func verifyExp(exp int64, now int64, required bool) bool {
+ if exp == 0 {
+ return !required
+ }
+ return now <= exp
+}
+
+func verifyIat(iat int64, now int64, required bool) bool {
+ if iat == 0 {
+ return !required
+ }
+ return now >= iat
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+ if iss == "" {
+ return !required
+ }
+ if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
+ return true
+ } else {
+ return false
+ }
+}
+
+func verifyNbf(nbf int64, now int64, required bool) bool {
+ if nbf == 0 {
+ return !required
+ }
+ return now >= nbf
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go
new file mode 100644
index 0000000..a86dc1a
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
new file mode 100644
index 0000000..f977381
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
@@ -0,0 +1,148 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// Implements the ECDSA family of signing methods signing methods
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return ErrInvalidKeyType
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
+ return nil
+ } else {
+ return ErrECDSAVerification
+ }
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return "", ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outpus (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return EncodeSegment(out), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
new file mode 100644
index 0000000..d19624b
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
@@ -0,0 +1,67 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
+)
+
+// Parse PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go
new file mode 100644
index 0000000..1c93024
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/errors.go
@@ -0,0 +1,59 @@
+package jwt
+
+import (
+ "errors"
+)
+
+// Error constants
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+ ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
+ ValidationErrorUnverifiable // Token could not be verified because of signing problems
+ ValidationErrorSignatureInvalid // Signature validation failed
+
+ // Standard Claim validation errors
+ ValidationErrorAudience // AUD validation failed
+ ValidationErrorExpired // EXP validation failed
+ ValidationErrorIssuedAt // IAT validation failed
+ ValidationErrorIssuer // ISS validation failed
+ ValidationErrorNotValidYet // NBF validation failed
+ ValidationErrorId // JTI validation failed
+ ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// Helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+ return &ValidationError{
+ text: errorText,
+ Errors: errorFlags,
+ }
+}
+
+// The error from Parse if token is not valid
+type ValidationError struct {
+ Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
+ Errors uint32 // bitfield. see ValidationError... constants
+ text string // errors that do not have a valid error just have text
+}
+
+// Validation error is an error type
+func (e ValidationError) Error() string {
+ if e.Inner != nil {
+ return e.Inner.Error()
+ } else if e.text != "" {
+ return e.text
+ } else {
+ return "token is invalid"
+ }
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+ return e.Errors == 0
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go
new file mode 100644
index 0000000..addbe5d
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go
@@ -0,0 +1,95 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// Implements the HMAC-SHA family of signing methods signing methods
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
+func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Decode signature, for comparison
+ sig, err := DecodeSegment(signature)
+ if err != nil {
+ return err
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Implements the Sign method from SigningMethod for this signing method.
+// Key must be []byte
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return EncodeSegment(hasher.Sum(nil)), nil
+ }
+
+ return "", ErrInvalidKeyType
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
new file mode 100644
index 0000000..291213c
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
@@ -0,0 +1,94 @@
+package jwt
+
+import (
+ "encoding/json"
+ "errors"
+ // "fmt"
+)
+
+// Claims type that uses the map[string]interface{} for JSON decoding
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+ aud, _ := m["aud"].(string)
+ return verifyAud(aud, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ switch exp := m["exp"].(type) {
+ case float64:
+ return verifyExp(int64(exp), cmp, req)
+ case json.Number:
+ v, _ := exp.Int64()
+ return verifyExp(v, cmp, req)
+ }
+ return req == false
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ switch iat := m["iat"].(type) {
+ case float64:
+ return verifyIat(int64(iat), cmp, req)
+ case json.Number:
+ v, _ := iat.Int64()
+ return verifyIat(v, cmp, req)
+ }
+ return req == false
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+ iss, _ := m["iss"].(string)
+ return verifyIss(iss, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ switch nbf := m["nbf"].(type) {
+ case float64:
+ return verifyNbf(int64(nbf), cmp, req)
+ case json.Number:
+ v, _ := nbf.Int64()
+ return verifyNbf(v, cmp, req)
+ }
+ return req == false
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ if m.VerifyExpiresAt(now, false) == false {
+ vErr.Inner = errors.New("Token is expired")
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if m.VerifyIssuedAt(now, false) == false {
+ vErr.Inner = errors.New("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if m.VerifyNotBefore(now, false) == false {
+ vErr.Inner = errors.New("Token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go
new file mode 100644
index 0000000..f04d189
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/none.go
@@ -0,0 +1,52 @@
+package jwt
+
+// Implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if signature != "" {
+ return NewValidationError(
+ "'none' signing method with non-empty signature",
+ ValidationErrorSignatureInvalid,
+ )
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return "", nil
+ }
+ return "", NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go
new file mode 100644
index 0000000..d6901d9
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/parser.go
@@ -0,0 +1,148 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type Parser struct {
+ ValidMethods []string // If populated, only these methods will be considered valid
+ UseJSONNumber bool // Use JSON Number format in JSON decoder
+ SkipClaimsValidation bool // Skip claims validation during token parsing
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.ValidMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.ValidMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+ }
+ }
+
+ // Lookup key
+ var key interface{}
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+ }
+ if key, err = keyFunc(token); err != nil {
+ // keyFunc returned an error
+ if ve, ok := err.(*ValidationError); ok {
+ return token, ve
+ }
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+ }
+
+ vErr := &ValidationError{}
+
+ // Validate Claims
+ if !p.SkipClaimsValidation {
+ if err := token.Claims.Valid(); err != nil {
+
+ // If the Claims Valid returned an error, check if it is a validation error,
+ // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+ if e, ok := err.(*ValidationError); !ok {
+ vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+ } else {
+ vErr = e
+ }
+ }
+ }
+
+ // Perform validation
+ token.Signature = parts[2]
+ if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ vErr.Inner = err
+ vErr.Errors |= ValidationErrorSignatureInvalid
+ }
+
+ if vErr.valid() {
+ token.Valid = true
+ return token, nil
+ }
+
+ return token, vErr
+}
+
+// WARNING: Don't use this method unless you know what you're doing
+//
+// This method parses the token but doesn't validate the signature. It's only
+// ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from
+// it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ parts = strings.Split(tokenString, ".")
+ if len(parts) != 3 {
+ return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+ }
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.UseJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+ }
+ } else {
+ return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ }
+
+ return token, parts, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go
new file mode 100644
index 0000000..e4caf1c
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// Implements the RSA family of signing methods signing methods
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return "", ErrInvalidKey
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
new file mode 100644
index 0000000..10ee9db
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
@@ -0,0 +1,126 @@
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// Implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ Hash: crypto.SHA256,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ Hash: crypto.SHA384,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ Hash: crypto.SHA512,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return ErrInvalidKey
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
new file mode 100644
index 0000000..a5ababf
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
+ ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
+)
+
+// Parse PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
new file mode 100644
index 0000000..ed1f212
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
@@ -0,0 +1,35 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// Implement SigningMethod to add new methods for signing or verifying tokens.
+type SigningMethod interface {
+ Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// Register the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// Get a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go
new file mode 100644
index 0000000..d637e08
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/token.go
@@ -0,0 +1,108 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value. This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Parse methods use this callback function to supply
+// the key for verification. The function receives the parsed,
+// but unverified Token. This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// A JWT Token. Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+ Raw string // The raw token. Populated when you Parse a token
+ Method SigningMethod // The signing method used or to be used
+ Header map[string]interface{} // The first segment of the token
+ Claims Claims // The second segment of the token
+ Signature string // The third segment of the token. Populated when you Parse a token
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+}
+
+// Create a new Token. Takes a signing method
+func New(method SigningMethod) *Token {
+ return NewWithClaims(method, MapClaims{})
+}
+
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// Get the complete, signed token
+func (t *Token) SignedString(key interface{}) (string, error) {
+ var sig, sstr string
+ var err error
+ if sstr, err = t.SigningString(); err != nil {
+ return "", err
+ }
+ if sig, err = t.Method.Sign(sstr, key); err != nil {
+ return "", err
+ }
+ return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// Generate the signing string. This is the
+// most expensive part of the whole deal. Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+ var err error
+ parts := make([]string, 2)
+ for i, _ := range parts {
+ var jsonValue []byte
+ if i == 0 {
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
+ }
+ } else {
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ }
+
+ parts[i] = EncodeSegment(jsonValue)
+ }
+ return strings.Join(parts, "."), nil
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).Parse(tokenString, keyFunc)
+}
+
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// Encode JWT specific base64url encoding with padding stripped
+func EncodeSegment(seg []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
+}
+
+// Decode JWT specific base64url encoding with padding stripped
+func DecodeSegment(seg string) ([]byte, error) {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+
+ return base64.URLEncoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/dimchansky/utfbom/.gitignore b/vendor/github.com/dimchansky/utfbom/.gitignore
new file mode 100644
index 0000000..d7ec5ce
--- /dev/null
+++ b/vendor/github.com/dimchansky/utfbom/.gitignore
@@ -0,0 +1,37 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+*.o
+*.a
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.prof
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# Gogland
+.idea/
\ No newline at end of file
diff --git a/vendor/github.com/dimchansky/utfbom/.travis.yml b/vendor/github.com/dimchansky/utfbom/.travis.yml
new file mode 100644
index 0000000..3512c85
--- /dev/null
+++ b/vendor/github.com/dimchansky/utfbom/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+ - '1.10'
+ - '1.11'
+
+# sudo=false makes the build run using a container
+sudo: false
+
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+ - go get golang.org/x/tools/cmd/goimports
+ - go get github.com/golang/lint/golint
+script:
+ - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
+ - golint ./... # This won't break the build, just show warnings
+ - $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/dimchansky/utfbom/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md
new file mode 100644
index 0000000..8ece280
--- /dev/null
+++ b/vendor/github.com/dimchansky/utfbom/README.md
@@ -0,0 +1,66 @@
+# utfbom [](https://godoc.org/github.com/dimchansky/utfbom) [](https://opensource.org/licenses/Apache-2.0) [](https://travis-ci.org/dimchansky/utfbom) [](https://goreportcard.com/report/github.com/dimchansky/utfbom) [](https://coveralls.io/github/dimchansky/utfbom?branch=master)
+
+The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM.
+
+## Installation
+
+ go get -u github.com/dimchansky/utfbom
+
+## Example
+
+```go
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/dimchansky/utfbom"
+)
+
+func main() {
+ trySkip([]byte("\xEF\xBB\xBFhello"))
+ trySkip([]byte("hello"))
+}
+
+func trySkip(byteData []byte) {
+ fmt.Println("Input:", byteData)
+
+ // just skip BOM
+ output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData)))
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ fmt.Println("ReadAll with BOM skipping", output)
+
+ // skip BOM and detect encoding
+ sr, enc := utfbom.Skip(bytes.NewReader(byteData))
+ fmt.Printf("Detected encoding: %s\n", enc)
+ output, err = ioutil.ReadAll(sr)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ fmt.Println("ReadAll with BOM detection and skipping", output)
+ fmt.Println()
+}
+```
+
+Output:
+
+```
+$ go run main.go
+Input: [239 187 191 104 101 108 108 111]
+ReadAll with BOM skipping [104 101 108 108 111]
+Detected encoding: UTF8
+ReadAll with BOM detection and skipping [104 101 108 108 111]
+
+Input: [104 101 108 108 111]
+ReadAll with BOM skipping [104 101 108 108 111]
+Detected encoding: Unknown
+ReadAll with BOM detection and skipping [104 101 108 108 111]
+```
+
+
diff --git a/vendor/github.com/dimchansky/utfbom/go.mod b/vendor/github.com/dimchansky/utfbom/go.mod
new file mode 100644
index 0000000..4b9ecc6
--- /dev/null
+++ b/vendor/github.com/dimchansky/utfbom/go.mod
@@ -0,0 +1 @@
+module github.com/dimchansky/utfbom
\ No newline at end of file
diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go
new file mode 100644
index 0000000..77a303e
--- /dev/null
+++ b/vendor/github.com/dimchansky/utfbom/utfbom.go
@@ -0,0 +1,192 @@
+// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary.
+// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader
+// interface but provides automatic BOM checking and removing as necessary.
+package utfbom
+
+import (
+ "errors"
+ "io"
+)
+
+// Encoding is type alias for detected UTF encoding.
+type Encoding int
+
+// Constants to identify detected UTF encodings.
+const (
+ // Unknown encoding, returned when no BOM was detected
+ Unknown Encoding = iota
+
+ // UTF8, BOM bytes: EF BB BF
+ UTF8
+
+ // UTF-16, big-endian, BOM bytes: FE FF
+ UTF16BigEndian
+
+ // UTF-16, little-endian, BOM bytes: FF FE
+ UTF16LittleEndian
+
+ // UTF-32, big-endian, BOM bytes: 00 00 FE FF
+ UTF32BigEndian
+
+ // UTF-32, little-endian, BOM bytes: FF FE 00 00
+ UTF32LittleEndian
+)
+
+// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface.
+func (e Encoding) String() string {
+ switch e {
+ case UTF8:
+ return "UTF8"
+ case UTF16BigEndian:
+ return "UTF16BigEndian"
+ case UTF16LittleEndian:
+ return "UTF16LittleEndian"
+ case UTF32BigEndian:
+ return "UTF32BigEndian"
+ case UTF32LittleEndian:
+ return "UTF32LittleEndian"
+ default:
+ return "Unknown"
+ }
+}
+
+const maxConsecutiveEmptyReads = 100
+
+// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
+// It also returns the encoding detected by the BOM.
+// If the detected encoding is not needed, you can call the SkipOnly function.
+func Skip(rd io.Reader) (*Reader, Encoding) {
+ // Is it already a Reader?
+ b, ok := rd.(*Reader)
+ if ok {
+ return b, Unknown
+ }
+
+ enc, left, err := detectUtf(rd)
+ return &Reader{
+ rd: rd,
+ buf: left,
+ err: err,
+ }, enc
+}
+
+// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
+func SkipOnly(rd io.Reader) *Reader {
+ r, _ := Skip(rd)
+ return r
+}
+
+// Reader implements automatic BOM (Unicode Byte Order Mark) checking and
+// removing as necessary for an io.Reader object.
+type Reader struct {
+ rd io.Reader // reader provided by the client
+ buf []byte // buffered data
+ err error // last error
+}
+
+// Read is an implementation of io.Reader interface.
+// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary.
+func (r *Reader) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ if r.buf == nil {
+ if r.err != nil {
+ return 0, r.readErr()
+ }
+
+ return r.rd.Read(p)
+ }
+
+ // copy as much as we can
+ n = copy(p, r.buf)
+ r.buf = nilIfEmpty(r.buf[n:])
+ return n, nil
+}
+
+func (r *Reader) readErr() error {
+ err := r.err
+ r.err = nil
+ return err
+}
+
+var errNegativeRead = errors.New("utfbom: reader returned negative count from Read")
+
+func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) {
+ buf, err = readBOM(rd)
+
+ if len(buf) >= 4 {
+ if isUTF32BigEndianBOM4(buf) {
+ return UTF32BigEndian, nilIfEmpty(buf[4:]), err
+ }
+ if isUTF32LittleEndianBOM4(buf) {
+ return UTF32LittleEndian, nilIfEmpty(buf[4:]), err
+ }
+ }
+
+ if len(buf) > 2 && isUTF8BOM3(buf) {
+ return UTF8, nilIfEmpty(buf[3:]), err
+ }
+
+ if (err != nil && err != io.EOF) || (len(buf) < 2) {
+ return Unknown, nilIfEmpty(buf), err
+ }
+
+ if isUTF16BigEndianBOM2(buf) {
+ return UTF16BigEndian, nilIfEmpty(buf[2:]), err
+ }
+ if isUTF16LittleEndianBOM2(buf) {
+ return UTF16LittleEndian, nilIfEmpty(buf[2:]), err
+ }
+
+ return Unknown, nilIfEmpty(buf), err
+}
+
+func readBOM(rd io.Reader) (buf []byte, err error) {
+ const maxBOMSize = 4
+ var bom [maxBOMSize]byte // used to read BOM
+
+ // read as many bytes as possible
+ for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] {
+ if n, err = rd.Read(bom[len(buf):]); n < 0 {
+ panic(errNegativeRead)
+ }
+ if n > 0 {
+ nEmpty = 0
+ } else {
+ nEmpty++
+ if nEmpty >= maxConsecutiveEmptyReads {
+ err = io.ErrNoProgress
+ }
+ }
+ }
+ return
+}
+
+func isUTF32BigEndianBOM4(buf []byte) bool {
+ return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF
+}
+
+func isUTF32LittleEndianBOM4(buf []byte) bool {
+ return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00
+}
+
+func isUTF8BOM3(buf []byte) bool {
+ return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF
+}
+
+func isUTF16BigEndianBOM2(buf []byte) bool {
+ return buf[0] == 0xFE && buf[1] == 0xFF
+}
+
+func isUTF16LittleEndianBOM2(buf []byte) bool {
+ return buf[0] == 0xFF && buf[1] == 0xFE
+}
+
+func nilIfEmpty(buf []byte) (res []byte) {
+ if len(buf) > 0 {
+ res = buf
+ }
+ return
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/.gitignore b/vendor/github.com/mattn/go-ieproxy/.gitignore
new file mode 100644
index 0000000..bc8a670
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/.gitignore
@@ -0,0 +1 @@
+.idea/*
\ No newline at end of file
diff --git a/vendor/github.com/mattn/go-ieproxy/GetProxyFunc.go b/vendor/github.com/mattn/go-ieproxy/GetProxyFunc.go
new file mode 100644
index 0000000..b2ff914
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/GetProxyFunc.go
@@ -0,0 +1,11 @@
+package ieproxy
+
+import (
+ "net/http"
+ "net/url"
+)
+
+// GetProxyFunc is a forwarder for the OS-Exclusive proxyMiddleman_os.go files
+func GetProxyFunc() func(*http.Request) (*url.URL, error) {
+ return proxyMiddleman()
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/LICENSE b/vendor/github.com/mattn/go-ieproxy/LICENSE
new file mode 100644
index 0000000..7b7c0f8
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/LICENSE
@@ -0,0 +1,23 @@
+MIT License
+
+Copyright (c) 2014 mattn
+Copyright (c) 2017 oliverpool
+Copyright (c) 2019 Adele Reed
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-ieproxy/README.md b/vendor/github.com/mattn/go-ieproxy/README.md
new file mode 100644
index 0000000..fbc801a
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/README.md
@@ -0,0 +1,49 @@
+# ieproxy
+
+Go package to detect the proxy settings on Windows platform.
+
+The settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails.
+
+For more information, take a look at the [documentation](https://godoc.org/github.com/mattn/go-ieproxy)
+
+## Methods
+
+You can either obtain a `net/http` compatible proxy function using `ieproxy.GetProxyFunc()`, set environment variables using `ieproxy.OverrideEnvWithStaticProxy()` (though no automatic configuration is available this way), or obtain the proxy settings via `ieproxy.GetConf()`.
+
+| Method | Supported configuration options: |
+|----------------------------------------|-----------------------------------------------|
+| `ieproxy.GetProxyFunc()` | Static, Specified script, and fully automatic |
+| `ieproxy.OverrideEnvWithStaticProxy()` | Static |
+| `ieproxy.GetConf()` | Depends on how you use it |
+
+## Examples
+
+### Using GetProxyFunc():
+
+```go
+func init() {
+ http.DefaultTransport.(*http.Transport).Proxy = ieproxy.GetProxyFunc()
+}
+```
+
+GetProxyFunc acts as a middleman between `net/http` and `mattn/go-ieproxy` in order to select the correct proxy configuration based off the details supplied in the config.
+
+### Using OverrideEnvWithStaticProxy():
+
+```go
+func init() {
+ ieproxy.OverrideEnvWithStaticProxy()
+ http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment
+}
+```
+
+OverrideEnvWithStaticProxy overrides the relevant environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`) with the **static, manually configured** proxy details typically found in the registry.
+
+### Using GetConf():
+
+```go
+func main() {
+ conf := ieproxy.GetConf()
+ //Handle proxies how you want to.
+}
+```
diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy.go b/vendor/github.com/mattn/go-ieproxy/ieproxy.go
new file mode 100644
index 0000000..51fe18e
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/ieproxy.go
@@ -0,0 +1,51 @@
+// Package ieproxy is a utility to retrieve the proxy parameters (especially of Internet Explorer on windows)
+//
+// On windows, it gathers the parameters from the registry (regedit), while it uses env variable on other platforms
+package ieproxy
+
+import "os"
+
+// ProxyConf gathers the configuration for proxy
+type ProxyConf struct {
+ Static StaticProxyConf // static configuration
+ Automatic ProxyScriptConf // script configuration
+}
+
+// StaticProxyConf contains the configuration for static proxy
+type StaticProxyConf struct {
+ // Is the proxy active?
+ Active bool
+ // Proxy address for each scheme (http, https)
+ // "" (empty string) is the fallback proxy
+ Protocols map[string]string
+ // Addresses not to be browsed via the proxy (comma-separated, linux-like)
+ NoProxy string
+}
+
+// ProxyScriptConf contains the configuration for automatic proxy
+type ProxyScriptConf struct {
+ // Is the proxy active?
+ Active bool
+ // PreConfiguredURL of the .pac file.
+ // If this is empty and Active is true, auto-configuration should be assumed.
+ PreConfiguredURL string
+}
+
+// GetConf retrieves the proxy configuration from the Windows Regedit
+func GetConf() ProxyConf {
+ return getConf()
+}
+
+// OverrideEnvWithStaticProxy writes new values to the
+// `http_proxy`, `https_proxy` and `no_proxy` environment variables.
+// The values are taken from the Windows Regedit (should be called in `init()` function - see example)
+func OverrideEnvWithStaticProxy() {
+ overrideEnvWithStaticProxy(GetConf(), os.Setenv)
+}
+
+// FindProxyForURL computes the proxy for a given URL according to the pac file
+func (psc *ProxyScriptConf) FindProxyForURL(URL string) string {
+ return psc.findProxyForURL(URL)
+}
+
+type envSetter func(string, string) error
diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go
new file mode 100644
index 0000000..dc2bccf
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ieproxy
+
+func getConf() ProxyConf {
+ return ProxyConf{}
+}
+
+func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) {
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go
new file mode 100644
index 0000000..a3d4c11
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go
@@ -0,0 +1,164 @@
+package ieproxy
+
+import (
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+type regeditValues struct {
+ ProxyServer string
+ ProxyOverride string
+ ProxyEnable uint64
+ AutoConfigURL string
+}
+
+var once sync.Once
+var windowsProxyConf ProxyConf
+
+// GetConf retrieves the proxy configuration from the Windows Regedit
+func getConf() ProxyConf {
+ once.Do(writeConf)
+ return windowsProxyConf
+}
+
+func writeConf() {
+ var (
+ cfg *tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG
+ err error
+ )
+
+ if cfg, err = getUserConfigFromWindowsSyscall(); err != nil {
+ regedit, _ := readRegedit() // If the syscall fails, backup to manual detection.
+ windowsProxyConf = parseRegedit(regedit)
+ return
+ }
+
+ defer globalFreeWrapper(cfg.lpszProxy)
+ defer globalFreeWrapper(cfg.lpszProxyBypass)
+ defer globalFreeWrapper(cfg.lpszAutoConfigUrl)
+
+ windowsProxyConf = ProxyConf{
+ Static: StaticProxyConf{
+ Active: cfg.lpszProxy != nil,
+ },
+ Automatic: ProxyScriptConf{
+ Active: cfg.lpszAutoConfigUrl != nil || cfg.fAutoDetect,
+ },
+ }
+
+ if windowsProxyConf.Static.Active {
+ protocol := make(map[string]string)
+ for _, s := range strings.Split(StringFromUTF16Ptr(cfg.lpszProxy), ";") {
+ s = strings.TrimSpace(s)
+ if s == "" {
+ continue
+ }
+ pair := strings.SplitN(s, "=", 2)
+ if len(pair) > 1 {
+ protocol[pair[0]] = pair[1]
+ } else {
+ protocol[""] = pair[0]
+ }
+ }
+
+ windowsProxyConf.Static.Protocols = protocol
+ if cfg.lpszProxyBypass != nil {
+ windowsProxyConf.Static.NoProxy = strings.Replace(StringFromUTF16Ptr(cfg.lpszProxyBypass), ";", ",", -1)
+ }
+ }
+
+ if windowsProxyConf.Automatic.Active {
+ windowsProxyConf.Automatic.PreConfiguredURL = StringFromUTF16Ptr(cfg.lpszAutoConfigUrl)
+ }
+}
+
+func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) {
+ handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0)
+ if handle == 0 {
+ return &tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG{}, err
+ }
+ defer winHttpCloseHandle.Call(handle)
+
+ config := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG)
+
+ ret, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(config)))
+ if ret > 0 {
+ err = nil
+ }
+
+ return config, err
+}
+
+// OverrideEnvWithStaticProxy writes new values to the
+// http_proxy, https_proxy and no_proxy environment variables.
+// The values are taken from the Windows Regedit (should be called in init() function)
+func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) {
+ if conf.Static.Active {
+ for _, scheme := range []string{"http", "https"} {
+ url := mapFallback(scheme, "", conf.Static.Protocols)
+ setenv(scheme+"_proxy", url)
+ }
+ if conf.Static.NoProxy != "" {
+ setenv("no_proxy", conf.Static.NoProxy)
+ }
+ }
+}
+
+func parseRegedit(regedit regeditValues) ProxyConf {
+ protocol := make(map[string]string)
+ for _, s := range strings.Split(regedit.ProxyServer, ";") {
+ if s == "" {
+ continue
+ }
+ pair := strings.SplitN(s, "=", 2)
+ if len(pair) > 1 {
+ protocol[pair[0]] = pair[1]
+ } else {
+ protocol[""] = pair[0]
+ }
+ }
+
+ return ProxyConf{
+ Static: StaticProxyConf{
+ Active: regedit.ProxyEnable > 0,
+ Protocols: protocol,
+ NoProxy: strings.Replace(regedit.ProxyOverride, ";", ",", -1), // to match linux style
+ },
+ Automatic: ProxyScriptConf{
+ Active: regedit.AutoConfigURL != "",
+ PreConfiguredURL: regedit.AutoConfigURL,
+ },
+ }
+}
+
+func readRegedit() (values regeditValues, err error) {
+ k, err := registry.OpenKey(registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE)
+ if err != nil {
+ return
+ }
+ defer k.Close()
+
+ values.ProxyServer, _, err = k.GetStringValue("ProxyServer")
+ if err != nil && err != registry.ErrNotExist {
+ return
+ }
+ values.ProxyOverride, _, err = k.GetStringValue("ProxyOverride")
+ if err != nil && err != registry.ErrNotExist {
+ return
+ }
+
+ values.ProxyEnable, _, err = k.GetIntegerValue("ProxyEnable")
+ if err != nil && err != registry.ErrNotExist {
+ return
+ }
+
+ values.AutoConfigURL, _, err = k.GetStringValue("AutoConfigURL")
+ if err != nil && err != registry.ErrNotExist {
+ return
+ }
+ err = nil
+ return
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go
new file mode 100644
index 0000000..cfb4349
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go
@@ -0,0 +1,15 @@
+package ieproxy
+
+import (
+ "golang.org/x/sys/windows"
+ "unsafe"
+)
+
+var kernel32 = windows.NewLazySystemDLL("kernel32.dll")
+var globalFree = kernel32.NewProc("GlobalFree")
+
+func globalFreeWrapper(ptr *uint16) {
+ if ptr != nil {
+ _, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr)))
+ }
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/pac_unix.go b/vendor/github.com/mattn/go-ieproxy/pac_unix.go
new file mode 100644
index 0000000..d44ec3c
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/pac_unix.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package ieproxy
+
+func (psc *ProxyScriptConf) findProxyForURL(URL string) string {
+ return ""
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/pac_windows.go b/vendor/github.com/mattn/go-ieproxy/pac_windows.go
new file mode 100644
index 0000000..6a2ee67
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/pac_windows.go
@@ -0,0 +1,72 @@
+package ieproxy
+
+import (
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+func (psc *ProxyScriptConf) findProxyForURL(URL string) string {
+ if !psc.Active {
+ return ""
+ }
+ proxy, _ := getProxyForURL(psc.PreConfiguredURL, URL)
+ i := strings.Index(proxy, ";")
+ if i >= 0 {
+ return proxy[:i]
+ }
+ return proxy
+}
+
+func getProxyForURL(pacfileURL, URL string) (string, error) {
+ pacfileURLPtr, err := syscall.UTF16PtrFromString(pacfileURL)
+ if err != nil {
+ return "", err
+ }
+ URLPtr, err := syscall.UTF16PtrFromString(URL)
+ if err != nil {
+ return "", err
+ }
+
+ handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0)
+ if handle == 0 {
+ return "", err
+ }
+ defer winHttpCloseHandle.Call(handle)
+
+ dwFlags := fWINHTTP_AUTOPROXY_CONFIG_URL
+ dwAutoDetectFlags := autoDetectFlag(0)
+ pfURLptr := pacfileURLPtr
+
+ if pacfileURL == "" {
+ dwFlags = fWINHTTP_AUTOPROXY_AUTO_DETECT
+ dwAutoDetectFlags = fWINHTTP_AUTO_DETECT_TYPE_DNS_A | fWINHTTP_AUTO_DETECT_TYPE_DHCP
+ pfURLptr = nil
+ }
+
+ options := tWINHTTP_AUTOPROXY_OPTIONS{
+ dwFlags: dwFlags, // adding cache might cause issues: https://github.com/mattn/go-ieproxy/issues/6
+ dwAutoDetectFlags: dwAutoDetectFlags,
+ lpszAutoConfigUrl: pfURLptr,
+ lpvReserved: nil,
+ dwReserved: 0,
+ fAutoLogonIfChallenged: true, // may not be optimal https://msdn.microsoft.com/en-us/library/windows/desktop/aa383153(v=vs.85).aspx
+ } // lpszProxyBypass isn't used as this only executes in cases where there (may) be a pac file (autodetect can fail), where lpszProxyBypass couldn't be returned.
+ // in the case that autodetect fails and no pre-specified pacfile is present, no proxy is returned.
+
+ info := new(tWINHTTP_PROXY_INFO)
+
+ ret, _, err := winHttpGetProxyForURL.Call(
+ handle,
+ uintptr(unsafe.Pointer(URLPtr)),
+ uintptr(unsafe.Pointer(&options)),
+ uintptr(unsafe.Pointer(info)),
+ )
+ if ret > 0 {
+ err = nil
+ }
+
+ defer globalFreeWrapper(info.lpszProxyBypass)
+ defer globalFreeWrapper(info.lpszProxy)
+ return StringFromUTF16Ptr(info.lpszProxy), err
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_unix.go b/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_unix.go
new file mode 100644
index 0000000..7ddbe2e
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_unix.go
@@ -0,0 +1,13 @@
+//+build darwin unix linux
+
+package ieproxy
+
+import (
+ "net/http"
+ "net/url"
+)
+
+func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) {
+ // Fallthrough to ProxyFromEnvironment on all other OSes.
+ return http.ProxyFromEnvironment
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_windows.go b/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_windows.go
new file mode 100644
index 0000000..355d432
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_windows.go
@@ -0,0 +1,51 @@
+package ieproxy
+
+import (
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/http/httpproxy"
+)
+
+func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) {
+ // Get the proxy configuration
+ conf := GetConf()
+ envcfg := httpproxy.FromEnvironment()
+
+ if envcfg.HTTPProxy != "" || envcfg.HTTPSProxy != "" {
+ // If the user manually specifies environment variables, prefer those over the Windows config.
+ return http.ProxyFromEnvironment
+ } else if conf.Automatic.Active {
+ // If automatic proxy obtaining is specified
+ return func(req *http.Request) (i *url.URL, e error) {
+ host := conf.Automatic.FindProxyForURL(req.URL.String())
+ if host == "" {
+ return nil, nil
+ }
+ return &url.URL{Host: host}, nil
+ }
+ } else if conf.Static.Active {
+ // If static proxy obtaining is specified
+ prox := httpproxy.Config{
+ HTTPSProxy: mapFallback("https", "", conf.Static.Protocols),
+ HTTPProxy: mapFallback("http", "", conf.Static.Protocols),
+ NoProxy: conf.Static.NoProxy,
+ }
+
+ return func(req *http.Request) (i *url.URL, e error) {
+ return prox.ProxyFunc()(req.URL)
+ }
+ } else {
+ // Final fallthrough case; use the environment variables.
+ return http.ProxyFromEnvironment
+ }
+}
+
+// Return oKey or fbKey if oKey doesn't exist in the map.
+func mapFallback(oKey, fbKey string, m map[string]string) string {
+ if v, ok := m[oKey]; ok {
+ return v
+ } else {
+ return m[fbKey]
+ }
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/utils.go b/vendor/github.com/mattn/go-ieproxy/utils.go
new file mode 100644
index 0000000..353b231
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/utils.go
@@ -0,0 +1,23 @@
+package ieproxy
+
+import (
+ "unicode/utf16"
+ "unsafe"
+)
+
+// StringFromUTF16Ptr converts a *uint16 C string to a Go String
+func StringFromUTF16Ptr(s *uint16) string {
+ if s == nil {
+ return ""
+ }
+
+ p := (*[1<<30 - 1]uint16)(unsafe.Pointer(s))
+
+ // find the string length
+ sz := 0
+ for p[sz] != 0 {
+ sz++
+ }
+
+ return string(utf16.Decode(p[:sz:sz]))
+}
diff --git a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go
new file mode 100644
index 0000000..560940d
--- /dev/null
+++ b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go
@@ -0,0 +1,50 @@
+package ieproxy
+
+import "golang.org/x/sys/windows"
+
+var winHttp = windows.NewLazySystemDLL("winhttp.dll")
+var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl")
+var winHttpOpen = winHttp.NewProc("WinHttpOpen")
+var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle")
+var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser")
+
+type tWINHTTP_AUTOPROXY_OPTIONS struct {
+ dwFlags autoProxyFlag
+ dwAutoDetectFlags autoDetectFlag
+ lpszAutoConfigUrl *uint16
+ lpvReserved *uint16
+ dwReserved uint32
+ fAutoLogonIfChallenged bool
+}
+type autoProxyFlag uint32
+
+const (
+ fWINHTTP_AUTOPROXY_AUTO_DETECT = autoProxyFlag(0x00000001)
+ fWINHTTP_AUTOPROXY_CONFIG_URL = autoProxyFlag(0x00000002)
+ fWINHTTP_AUTOPROXY_NO_CACHE_CLIENT = autoProxyFlag(0x00080000)
+ fWINHTTP_AUTOPROXY_NO_CACHE_SVC = autoProxyFlag(0x00100000)
+ fWINHTTP_AUTOPROXY_NO_DIRECTACCESS = autoProxyFlag(0x00040000)
+ fWINHTTP_AUTOPROXY_RUN_INPROCESS = autoProxyFlag(0x00010000)
+ fWINHTTP_AUTOPROXY_RUN_OUTPROCESS_ONLY = autoProxyFlag(0x00020000)
+ fWINHTTP_AUTOPROXY_SORT_RESULTS = autoProxyFlag(0x00400000)
+)
+
+type autoDetectFlag uint32
+
+const (
+ fWINHTTP_AUTO_DETECT_TYPE_DHCP = autoDetectFlag(0x00000001)
+ fWINHTTP_AUTO_DETECT_TYPE_DNS_A = autoDetectFlag(0x00000002)
+)
+
+type tWINHTTP_PROXY_INFO struct {
+ dwAccessType uint32
+ lpszProxy *uint16
+ lpszProxyBypass *uint16
+}
+
+type tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG struct {
+ fAutoDetect bool
+ lpszAutoConfigUrl *uint16
+ lpszProxy *uint16
+ lpszProxyBypass *uint16
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
new file mode 100644
index 0000000..233b8b6
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "errors"
+ "unicode/utf16"
+)
+
+// bmpString returns s encoded in UCS-2 with a zero terminator.
+func bmpString(s string) ([]byte, error) {
+ // References:
+ // https://tools.ietf.org/html/rfc7292#appendix-B.1
+ // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
+ // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
+ // EncodeRune returns 0xfffd if the rune does not need special encoding
+ // - the above RFC provides the info that BMPStrings are NULL terminated.
+
+ ret := make([]byte, 0, 2*len(s)+2)
+
+ for _, r := range s {
+ if t, _ := utf16.EncodeRune(r); t != 0xfffd {
+ return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
+ }
+ ret = append(ret, byte(r/256), byte(r%256))
+ }
+
+ return append(ret, 0, 0), nil
+}
+
+func decodeBMPString(bmpString []byte) (string, error) {
+ if len(bmpString)%2 != 0 {
+ return "", errors.New("pkcs12: odd-length BMP string")
+ }
+
+ // strip terminator if present
+ if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
+ bmpString = bmpString[:l-2]
+ }
+
+ s := make([]uint16, 0, len(bmpString)/2)
+ for len(bmpString) > 0 {
+ s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
+ bmpString = bmpString[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go
new file mode 100644
index 0000000..484ca51
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go
@@ -0,0 +1,131 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+
+ "golang.org/x/crypto/pkcs12/internal/rc2"
+)
+
+var (
+ oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
+ oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
+)
+
+// pbeCipher is an abstraction of a PKCS#12 cipher.
+type pbeCipher interface {
+ // create returns a cipher.Block given a key.
+ create(key []byte) (cipher.Block, error)
+ // deriveKey returns a key derived from the given password and salt.
+ deriveKey(salt, password []byte, iterations int) []byte
+ // deriveKey returns an IV derived from the given password and salt.
+ deriveIV(salt, password []byte, iterations int) []byte
+}
+
+type shaWithTripleDESCBC struct{}
+
+func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
+ return des.NewTripleDESCipher(key)
+}
+
+func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
+}
+
+func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
+}
+
+type shaWith40BitRC2CBC struct{}
+
+func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
+ return rc2.New(key, len(key)*8)
+}
+
+func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
+}
+
+func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
+}
+
+type pbeParams struct {
+ Salt []byte
+ Iterations int
+}
+
+func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
+ var cipherType pbeCipher
+
+ switch {
+ case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
+ cipherType = shaWithTripleDESCBC{}
+ case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
+ cipherType = shaWith40BitRC2CBC{}
+ default:
+ return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
+ }
+
+ var params pbeParams
+ if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil {
+ return nil, 0, err
+ }
+
+ key := cipherType.deriveKey(params.Salt, password, params.Iterations)
+ iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
+
+ block, err := cipherType.create(key)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
+}
+
+func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
+ cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
+ if err != nil {
+ return nil, err
+ }
+
+ encrypted := info.Data()
+ if len(encrypted) == 0 {
+ return nil, errors.New("pkcs12: empty encrypted data")
+ }
+ if len(encrypted)%blockSize != 0 {
+ return nil, errors.New("pkcs12: input is not a multiple of the block size")
+ }
+ decrypted = make([]byte, len(encrypted))
+ cbc.CryptBlocks(decrypted, encrypted)
+
+ psLen := int(decrypted[len(decrypted)-1])
+ if psLen == 0 || psLen > blockSize {
+ return nil, ErrDecryption
+ }
+
+ if len(decrypted) < psLen {
+ return nil, ErrDecryption
+ }
+ ps := decrypted[len(decrypted)-psLen:]
+ decrypted = decrypted[:len(decrypted)-psLen]
+ if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
+ return nil, ErrDecryption
+ }
+
+ return
+}
+
+// decryptable abstracts an object that contains ciphertext.
+type decryptable interface {
+ Algorithm() pkix.AlgorithmIdentifier
+ Data() []byte
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go
new file mode 100644
index 0000000..7377ce6
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/errors.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import "errors"
+
+var (
+ // ErrDecryption represents a failure to decrypt the input.
+ ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
+
+ // ErrIncorrectPassword is returned when an incorrect password is detected.
+ // Usually, P12/PFX data is signed to be able to verify the password.
+ ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
+)
+
+// NotImplementedError indicates that the input is not currently supported.
+type NotImplementedError string
+
+func (e NotImplementedError) Error() string {
+ return "pkcs12: " + string(e)
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
new file mode 100644
index 0000000..7499e3f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
@@ -0,0 +1,271 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rc2 implements the RC2 cipher
+/*
+https://www.ietf.org/rfc/rfc2268.txt
+http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf
+
+This code is licensed under the MIT license.
+*/
+package rc2
+
+import (
+ "crypto/cipher"
+ "encoding/binary"
+)
+
+// The rc2 block size in bytes
+const BlockSize = 8
+
+type rc2Cipher struct {
+ k [64]uint16
+}
+
+// New returns a new rc2 cipher with the given key and effective key length t1
+func New(key []byte, t1 int) (cipher.Block, error) {
+ // TODO(dgryski): error checking for key length
+ return &rc2Cipher{
+ k: expandKey(key, t1),
+ }, nil
+}
+
+func (*rc2Cipher) BlockSize() int { return BlockSize }
+
+var piTable = [256]byte{
+ 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
+ 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2,
+ 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
+ 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82,
+ 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc,
+ 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
+ 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03,
+ 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7,
+ 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
+ 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec,
+ 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39,
+ 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
+ 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9,
+ 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9,
+ 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
+ 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad,
+}
+
+func expandKey(key []byte, t1 int) [64]uint16 {
+
+ l := make([]byte, 128)
+ copy(l, key)
+
+ var t = len(key)
+ var t8 = (t1 + 7) / 8
+ var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8))))
+
+ for i := len(key); i < 128; i++ {
+ l[i] = piTable[l[i-1]+l[uint8(i-t)]]
+ }
+
+ l[128-t8] = piTable[l[128-t8]&tm]
+
+ for i := 127 - t8; i >= 0; i-- {
+ l[i] = piTable[l[i+1]^l[i+t8]]
+ }
+
+ var k [64]uint16
+
+ for i := range k {
+ k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256
+ }
+
+ return k
+}
+
+func rotl16(x uint16, b uint) uint16 {
+ return (x >> (16 - b)) | (x << b)
+}
+
+func (c *rc2Cipher) Encrypt(dst, src []byte) {
+
+ r0 := binary.LittleEndian.Uint16(src[0:])
+ r1 := binary.LittleEndian.Uint16(src[2:])
+ r2 := binary.LittleEndian.Uint16(src[4:])
+ r3 := binary.LittleEndian.Uint16(src[6:])
+
+ var j int
+
+ for j <= 16 {
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+
+ }
+
+ r0 = r0 + c.k[r3&63]
+ r1 = r1 + c.k[r0&63]
+ r2 = r2 + c.k[r1&63]
+ r3 = r3 + c.k[r2&63]
+
+ for j <= 40 {
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+
+ }
+
+ r0 = r0 + c.k[r3&63]
+ r1 = r1 + c.k[r0&63]
+ r2 = r2 + c.k[r1&63]
+ r3 = r3 + c.k[r2&63]
+
+ for j <= 60 {
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+ }
+
+ binary.LittleEndian.PutUint16(dst[0:], r0)
+ binary.LittleEndian.PutUint16(dst[2:], r1)
+ binary.LittleEndian.PutUint16(dst[4:], r2)
+ binary.LittleEndian.PutUint16(dst[6:], r3)
+}
+
+func (c *rc2Cipher) Decrypt(dst, src []byte) {
+
+ r0 := binary.LittleEndian.Uint16(src[0:])
+ r1 := binary.LittleEndian.Uint16(src[2:])
+ r2 := binary.LittleEndian.Uint16(src[4:])
+ r3 := binary.LittleEndian.Uint16(src[6:])
+
+ j := 63
+
+ for j >= 44 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+ }
+
+ r3 = r3 - c.k[r2&63]
+ r2 = r2 - c.k[r1&63]
+ r1 = r1 - c.k[r0&63]
+ r0 = r0 - c.k[r3&63]
+
+ for j >= 20 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+
+ }
+
+ r3 = r3 - c.k[r2&63]
+ r2 = r2 - c.k[r1&63]
+ r1 = r1 - c.k[r0&63]
+ r0 = r0 - c.k[r3&63]
+
+ for j >= 0 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+
+ }
+
+ binary.LittleEndian.PutUint16(dst[0:], r0)
+ binary.LittleEndian.PutUint16(dst[2:], r1)
+ binary.LittleEndian.PutUint16(dst[4:], r2)
+ binary.LittleEndian.PutUint16(dst[6:], r3)
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go
new file mode 100644
index 0000000..5f38aa7
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/mac.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+)
+
+type macData struct {
+ Mac digestInfo
+ MacSalt []byte
+ Iterations int `asn1:"optional,default:1"`
+}
+
+// from PKCS#7:
+type digestInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ Digest []byte
+}
+
+var (
+ oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
+)
+
+func verifyMac(macData *macData, message, password []byte) error {
+ if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
+ return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
+ }
+
+ key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
+
+ mac := hmac.New(sha1.New, key)
+ mac.Write(message)
+ expectedMAC := mac.Sum(nil)
+
+ if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
+ return ErrIncorrectPassword
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go
new file mode 100644
index 0000000..5c419d4
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go
@@ -0,0 +1,170 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "math/big"
+)
+
+var (
+ one = big.NewInt(1)
+)
+
+// sha1Sum returns the SHA-1 hash of in.
+func sha1Sum(in []byte) []byte {
+ sum := sha1.Sum(in)
+ return sum[:]
+}
+
+// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
+// repeats of pattern.
+func fillWithRepeats(pattern []byte, v int) []byte {
+ if len(pattern) == 0 {
+ return nil
+ }
+ outputLen := v * ((len(pattern) + v - 1) / v)
+ return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
+}
+
+func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
+ // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
+
+ // Let H be a hash function built around a compression function f:
+
+ // Z_2^u x Z_2^v -> Z_2^u
+
+ // (that is, H has a chaining variable and output of length u bits, and
+ // the message input to the compression function of H is v bits). The
+ // values for u and v are as follows:
+
+ // HASH FUNCTION VALUE u VALUE v
+ // MD2, MD5 128 512
+ // SHA-1 160 512
+ // SHA-224 224 512
+ // SHA-256 256 512
+ // SHA-384 384 1024
+ // SHA-512 512 1024
+ // SHA-512/224 224 1024
+ // SHA-512/256 256 1024
+
+ // Furthermore, let r be the iteration count.
+
+ // We assume here that u and v are both multiples of 8, as are the
+ // lengths of the password and salt strings (which we denote by p and s,
+ // respectively) and the number n of pseudorandom bits required. In
+ // addition, u and v are of course non-zero.
+
+ // For information on security considerations for MD5 [19], see [25] and
+ // [1], and on those for MD2, see [18].
+
+ // The following procedure can be used to produce pseudorandom bits for
+ // a particular "purpose" that is identified by a byte called "ID".
+ // This standard specifies 3 different values for the ID byte:
+
+ // 1. If ID=1, then the pseudorandom bits being produced are to be used
+ // as key material for performing encryption or decryption.
+
+ // 2. If ID=2, then the pseudorandom bits being produced are to be used
+ // as an IV (Initial Value) for encryption or decryption.
+
+ // 3. If ID=3, then the pseudorandom bits being produced are to be used
+ // as an integrity key for MACing.
+
+ // 1. Construct a string, D (the "diversifier"), by concatenating v/8
+ // copies of ID.
+ var D []byte
+ for i := 0; i < v; i++ {
+ D = append(D, ID)
+ }
+
+ // 2. Concatenate copies of the salt together to create a string S of
+ // length v(ceiling(s/v)) bits (the final copy of the salt may be
+ // truncated to create S). Note that if the salt is the empty
+ // string, then so is S.
+
+ S := fillWithRepeats(salt, v)
+
+ // 3. Concatenate copies of the password together to create a string P
+ // of length v(ceiling(p/v)) bits (the final copy of the password
+ // may be truncated to create P). Note that if the password is the
+ // empty string, then so is P.
+
+ P := fillWithRepeats(password, v)
+
+ // 4. Set I=S||P to be the concatenation of S and P.
+ I := append(S, P...)
+
+ // 5. Set c=ceiling(n/u).
+ c := (size + u - 1) / u
+
+ // 6. For i=1, 2, ..., c, do the following:
+ A := make([]byte, c*20)
+ var IjBuf []byte
+ for i := 0; i < c; i++ {
+ // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
+ // H(H(H(... H(D||I))))
+ Ai := hash(append(D, I...))
+ for j := 1; j < r; j++ {
+ Ai = hash(Ai)
+ }
+ copy(A[i*20:], Ai[:])
+
+ if i < c-1 { // skip on last iteration
+ // B. Concatenate copies of Ai to create a string B of length v
+ // bits (the final copy of Ai may be truncated to create B).
+ var B []byte
+ for len(B) < v {
+ B = append(B, Ai[:]...)
+ }
+ B = B[:v]
+
+ // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
+ // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
+ // setting I_j=(I_j+B+1) mod 2^v for each j.
+ {
+ Bbi := new(big.Int).SetBytes(B)
+ Ij := new(big.Int)
+
+ for j := 0; j < len(I)/v; j++ {
+ Ij.SetBytes(I[j*v : (j+1)*v])
+ Ij.Add(Ij, Bbi)
+ Ij.Add(Ij, one)
+ Ijb := Ij.Bytes()
+ // We expect Ijb to be exactly v bytes,
+ // if it is longer or shorter we must
+ // adjust it accordingly.
+ if len(Ijb) > v {
+ Ijb = Ijb[len(Ijb)-v:]
+ }
+ if len(Ijb) < v {
+ if IjBuf == nil {
+ IjBuf = make([]byte, v)
+ }
+ bytesShort := v - len(Ijb)
+ for i := 0; i < bytesShort; i++ {
+ IjBuf[i] = 0
+ }
+ copy(IjBuf[bytesShort:], Ijb)
+ Ijb = IjBuf
+ }
+ copy(I[j*v:(j+1)*v], Ijb)
+ }
+ }
+ }
+ }
+ // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
+ // bit string, A.
+
+ // 8. Use the first n bits of A as the output of this entire process.
+ return A[:size]
+
+ // If the above process is being used to generate a DES key, the process
+ // should be used to create 64 random bits, and the key's parity bits
+ // should be set after the 64 bits have been produced. Similar concerns
+ // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
+ // similar keys with parity bits "built into them".
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
new file mode 100644
index 0000000..3e2ce69
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
@@ -0,0 +1,350 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkcs12 implements some of PKCS#12.
+//
+// This implementation is distilled from https://tools.ietf.org/html/rfc7292
+// and referenced documents. It is intended for decoding P12/PFX-stored
+// certificates and keys for use with the crypto/tls package.
+//
+// This package is frozen. If it's missing functionality you need, consider
+// an alternative like software.sslmate.com/src/go-pkcs12.
+package pkcs12
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
+ oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
+
+ oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
+ oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
+ oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
+)
+
+type pfxPdu struct {
+ Version int
+ AuthSafe contentInfo
+ MacData macData `asn1:"optional"`
+}
+
+type contentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
+}
+
+type encryptedData struct {
+ Version int
+ EncryptedContentInfo encryptedContentInfo
+}
+
+type encryptedContentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent []byte `asn1:"tag:0,optional"`
+}
+
+func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
+ return i.ContentEncryptionAlgorithm
+}
+
+func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
+
+type safeBag struct {
+ Id asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"tag:0,explicit"`
+ Attributes []pkcs12Attribute `asn1:"set,optional"`
+}
+
+type pkcs12Attribute struct {
+ Id asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"set"`
+}
+
+type encryptedPrivateKeyInfo struct {
+ AlgorithmIdentifier pkix.AlgorithmIdentifier
+ EncryptedData []byte
+}
+
+func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
+ return i.AlgorithmIdentifier
+}
+
+func (i encryptedPrivateKeyInfo) Data() []byte {
+ return i.EncryptedData
+}
+
+// PEM block types
+const (
+ certificateType = "CERTIFICATE"
+ privateKeyType = "PRIVATE KEY"
+)
+
+// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
+// trailing data after unmarshaling.
+func unmarshal(in []byte, out interface{}) error {
+ trailing, err := asn1.Unmarshal(in, out)
+ if err != nil {
+ return err
+ }
+ if len(trailing) != 0 {
+ return errors.New("pkcs12: trailing data found")
+ }
+ return nil
+}
+
+// ToPEM converts all "safe bags" contained in pfxData to PEM blocks.
+func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
+ encodedPassword, err := bmpString(password)
+ if err != nil {
+ return nil, ErrIncorrectPassword
+ }
+
+ bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
+
+ if err != nil {
+ return nil, err
+ }
+
+ blocks := make([]*pem.Block, 0, len(bags))
+ for _, bag := range bags {
+ block, err := convertBag(&bag, encodedPassword)
+ if err != nil {
+ return nil, err
+ }
+ blocks = append(blocks, block)
+ }
+
+ return blocks, nil
+}
+
+func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
+ block := &pem.Block{
+ Headers: make(map[string]string),
+ }
+
+ for _, attribute := range bag.Attributes {
+ k, v, err := convertAttribute(&attribute)
+ if err != nil {
+ return nil, err
+ }
+ block.Headers[k] = v
+ }
+
+ switch {
+ case bag.Id.Equal(oidCertBag):
+ block.Type = certificateType
+ certsData, err := decodeCertBag(bag.Value.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ block.Bytes = certsData
+ case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
+ block.Type = privateKeyType
+
+ key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
+ if err != nil {
+ return nil, err
+ }
+
+ switch key := key.(type) {
+ case *rsa.PrivateKey:
+ block.Bytes = x509.MarshalPKCS1PrivateKey(key)
+ case *ecdsa.PrivateKey:
+ block.Bytes, err = x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
+ }
+ default:
+ return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
+ }
+ return block, nil
+}
+
+func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
+ isString := false
+
+ switch {
+ case attribute.Id.Equal(oidFriendlyName):
+ key = "friendlyName"
+ isString = true
+ case attribute.Id.Equal(oidLocalKeyID):
+ key = "localKeyId"
+ case attribute.Id.Equal(oidMicrosoftCSPName):
+ // This key is chosen to match OpenSSL.
+ key = "Microsoft CSP Name"
+ isString = true
+ default:
+ return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
+ }
+
+ if isString {
+ if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
+ return "", "", err
+ }
+ if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
+ return "", "", err
+ }
+ } else {
+ var id []byte
+ if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
+ return "", "", err
+ }
+ value = hex.EncodeToString(id)
+ }
+
+ return key, value, nil
+}
+
+// Decode extracts a certificate and private key from pfxData. This function
+// assumes that there is only one certificate and only one private key in the
+// pfxData; if there are more use ToPEM instead.
+func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
+ encodedPassword, err := bmpString(password)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(bags) != 2 {
+ err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
+ return
+ }
+
+ for _, bag := range bags {
+ switch {
+ case bag.Id.Equal(oidCertBag):
+ if certificate != nil {
+ err = errors.New("pkcs12: expected exactly one certificate bag")
+ }
+
+ certsData, err := decodeCertBag(bag.Value.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ certs, err := x509.ParseCertificates(certsData)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(certs) != 1 {
+ err = errors.New("pkcs12: expected exactly one certificate in the certBag")
+ return nil, nil, err
+ }
+ certificate = certs[0]
+
+ case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
+ if privateKey != nil {
+ err = errors.New("pkcs12: expected exactly one key bag")
+ return nil, nil, err
+ }
+
+ if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ if certificate == nil {
+ return nil, nil, errors.New("pkcs12: certificate missing")
+ }
+ if privateKey == nil {
+ return nil, nil, errors.New("pkcs12: private key missing")
+ }
+
+ return
+}
+
+func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
+ pfx := new(pfxPdu)
+ if err := unmarshal(p12Data, pfx); err != nil {
+ return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
+ }
+
+ if pfx.Version != 3 {
+ return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
+ }
+
+ if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
+ return nil, nil, NotImplementedError("only password-protected PFX is implemented")
+ }
+
+ // unmarshal the explicit bytes in the content for type 'data'
+ if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
+ return nil, nil, err
+ }
+
+ if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
+ return nil, nil, errors.New("pkcs12: no MAC in data")
+ }
+
+ if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
+ if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
+ // some implementations use an empty byte array
+ // for the empty string password try one more
+ // time with empty-empty password
+ password = nil
+ err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ var authenticatedSafe []contentInfo
+ if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
+ return nil, nil, err
+ }
+
+ if len(authenticatedSafe) != 2 {
+ return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
+ }
+
+ for _, ci := range authenticatedSafe {
+ var data []byte
+
+ switch {
+ case ci.ContentType.Equal(oidDataContentType):
+ if err := unmarshal(ci.Content.Bytes, &data); err != nil {
+ return nil, nil, err
+ }
+ case ci.ContentType.Equal(oidEncryptedDataContentType):
+ var encryptedData encryptedData
+ if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
+ return nil, nil, err
+ }
+ if encryptedData.Version != 0 {
+ return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
+ }
+ if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
+ return nil, nil, err
+ }
+ default:
+ return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
+ }
+
+ var safeContents []safeBag
+ if err := unmarshal(data, &safeContents); err != nil {
+ return nil, nil, err
+ }
+ bags = append(bags, safeContents...)
+ }
+
+ return bags, password, nil
+}
diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go
new file mode 100644
index 0000000..def1f7b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+)
+
+var (
+ // see https://tools.ietf.org/html/rfc7292#appendix-D
+ oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
+ oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
+ oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
+)
+
+type certBag struct {
+ Id asn1.ObjectIdentifier
+ Data []byte `asn1:"tag:0,explicit"`
+}
+
+func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
+ pkinfo := new(encryptedPrivateKeyInfo)
+ if err = unmarshal(asn1Data, pkinfo); err != nil {
+ return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
+ }
+
+ pkData, err := pbDecrypt(pkinfo, password)
+ if err != nil {
+ return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
+ }
+
+ ret := new(asn1.RawValue)
+ if err = unmarshal(pkData, ret); err != nil {
+ return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
+ }
+
+ if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
+ return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
+ }
+
+ return privateKey, nil
+}
+
+func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
+ bag := new(certBag)
+ if err := unmarshal(asn1Data, bag); err != nil {
+ return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
+ }
+ if !bag.Id.Equal(oidCertTypeX509Certificate) {
+ return nil, NotImplementedError("only X509 certificates are supported")
+ }
+ return bag.Data, nil
+}
diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go
new file mode 100644
index 0000000..163645b
--- /dev/null
+++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go
@@ -0,0 +1,370 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httpproxy provides support for HTTP proxy determination
+// based on environment variables, as provided by net/http's
+// ProxyFromEnvironment function.
+//
+// The API is not subject to the Go 1 compatibility promise and may change at
+// any time.
+package httpproxy
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/idna"
+)
+
+// Config holds configuration for HTTP proxy settings. See
+// FromEnvironment for details.
+type Config struct {
+ // HTTPProxy represents the value of the HTTP_PROXY or
+ // http_proxy environment variable. It will be used as the proxy
+ // URL for HTTP requests and HTTPS requests unless overridden by
+ // HTTPSProxy or NoProxy.
+ HTTPProxy string
+
+ // HTTPSProxy represents the HTTPS_PROXY or https_proxy
+ // environment variable. It will be used as the proxy URL for
+ // HTTPS requests unless overridden by NoProxy.
+ HTTPSProxy string
+
+ // NoProxy represents the NO_PROXY or no_proxy environment
+ // variable. It specifies a string that contains comma-separated values
+ // specifying hosts that should be excluded from proxying. Each value is
+ // represented by an IP address prefix (1.2.3.4), an IP address prefix in
+ // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*).
+ // An IP address prefix and domain name can also include a literal port
+ // number (1.2.3.4:80).
+ // A domain name matches that name and all subdomains. A domain name with
+ // a leading "." matches subdomains only. For example "foo.com" matches
+ // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com".
+ // A single asterisk (*) indicates that no proxying should be done.
+ // A best effort is made to parse the string and errors are
+ // ignored.
+ NoProxy string
+
+ // CGI holds whether the current process is running
+ // as a CGI handler (FromEnvironment infers this from the
+ // presence of a REQUEST_METHOD environment variable).
+ // When this is set, ProxyForURL will return an error
+ // when HTTPProxy applies, because a client could be
+ // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy.
+ CGI bool
+}
+
+// config holds the parsed configuration for HTTP proxy settings.
+type config struct {
+ // Config represents the original configuration as defined above.
+ Config
+
+ // httpsProxy is the parsed URL of the HTTPSProxy if defined.
+ httpsProxy *url.URL
+
+ // httpProxy is the parsed URL of the HTTPProxy if defined.
+ httpProxy *url.URL
+
+ // ipMatchers represent all values in the NoProxy that are IP address
+ // prefixes or an IP address in CIDR notation.
+ ipMatchers []matcher
+
+ // domainMatchers represent all values in the NoProxy that are a domain
+ // name or hostname & domain name
+ domainMatchers []matcher
+}
+
+// FromEnvironment returns a Config instance populated from the
+// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the
+// lowercase versions thereof). HTTPS_PROXY takes precedence over
+// HTTP_PROXY for https requests.
+//
+// The environment values may be either a complete URL or a
+// "host[:port]", in which case the "http" scheme is assumed. An error
+// is returned if the value is a different form.
+func FromEnvironment() *Config {
+ return &Config{
+ HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"),
+ HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"),
+ NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
+ CGI: os.Getenv("REQUEST_METHOD") != "",
+ }
+}
+
+func getEnvAny(names ...string) string {
+ for _, n := range names {
+ if val := os.Getenv(n); val != "" {
+ return val
+ }
+ }
+ return ""
+}
+
+// ProxyFunc returns a function that determines the proxy URL to use for
+// a given request URL. Changing the contents of cfg will not affect
+// proxy functions created earlier.
+//
+// A nil URL and nil error are returned if no proxy is defined in the
+// environment, or a proxy should not be used for the given request, as
+// defined by NO_PROXY.
+//
+// As a special case, if req.URL.Host is "localhost" (with or without a
+// port number), then a nil URL and nil error will be returned.
+func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) {
+ // Preprocess the Config settings for more efficient evaluation.
+ cfg1 := &config{
+ Config: *cfg,
+ }
+ cfg1.init()
+ return cfg1.proxyForURL
+}
+
+func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) {
+ var proxy *url.URL
+ if reqURL.Scheme == "https" {
+ proxy = cfg.httpsProxy
+ }
+ if proxy == nil {
+ proxy = cfg.httpProxy
+ if proxy != nil && cfg.CGI {
+ return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy")
+ }
+ }
+ if proxy == nil {
+ return nil, nil
+ }
+ if !cfg.useProxy(canonicalAddr(reqURL)) {
+ return nil, nil
+ }
+
+ return proxy, nil
+}
+
+func parseProxy(proxy string) (*url.URL, error) {
+ if proxy == "" {
+ return nil, nil
+ }
+
+ proxyURL, err := url.Parse(proxy)
+ if err != nil ||
+ (proxyURL.Scheme != "http" &&
+ proxyURL.Scheme != "https" &&
+ proxyURL.Scheme != "socks5") {
+ // proxy was bogus. Try prepending "http://" to it and
+ // see if that parses correctly. If not, we fall
+ // through and complain about the original one.
+ if proxyURL, err := url.Parse("http://" + proxy); err == nil {
+ return proxyURL, nil
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
+ }
+ return proxyURL, nil
+}
+
+// useProxy reports whether requests to addr should use a proxy,
+// according to the NO_PROXY or no_proxy environment variable.
+// addr is always a canonicalAddr with a host and port.
+func (cfg *config) useProxy(addr string) bool {
+ if len(addr) == 0 {
+ return true
+ }
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return false
+ }
+ if host == "localhost" {
+ return false
+ }
+ ip := net.ParseIP(host)
+ if ip != nil {
+ if ip.IsLoopback() {
+ return false
+ }
+ }
+
+ addr = strings.ToLower(strings.TrimSpace(host))
+
+ if ip != nil {
+ for _, m := range cfg.ipMatchers {
+ if m.match(addr, port, ip) {
+ return false
+ }
+ }
+ }
+ for _, m := range cfg.domainMatchers {
+ if m.match(addr, port, ip) {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *config) init() {
+ if parsed, err := parseProxy(c.HTTPProxy); err == nil {
+ c.httpProxy = parsed
+ }
+ if parsed, err := parseProxy(c.HTTPSProxy); err == nil {
+ c.httpsProxy = parsed
+ }
+
+ for _, p := range strings.Split(c.NoProxy, ",") {
+ p = strings.ToLower(strings.TrimSpace(p))
+ if len(p) == 0 {
+ continue
+ }
+
+ if p == "*" {
+ c.ipMatchers = []matcher{allMatch{}}
+ c.domainMatchers = []matcher{allMatch{}}
+ return
+ }
+
+ // IPv4/CIDR, IPv6/CIDR
+ if _, pnet, err := net.ParseCIDR(p); err == nil {
+ c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet})
+ continue
+ }
+
+ // IPv4:port, [IPv6]:port
+ phost, pport, err := net.SplitHostPort(p)
+ if err == nil {
+ if len(phost) == 0 {
+ // There is no host part, likely the entry is malformed; ignore.
+ continue
+ }
+ if phost[0] == '[' && phost[len(phost)-1] == ']' {
+ phost = phost[1 : len(phost)-1]
+ }
+ } else {
+ phost = p
+ }
+ // IPv4, IPv6
+ if pip := net.ParseIP(phost); pip != nil {
+ c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport})
+ continue
+ }
+
+ if len(phost) == 0 {
+ // There is no host part, likely the entry is malformed; ignore.
+ continue
+ }
+
+ // domain.com or domain.com:80
+ // foo.com matches bar.foo.com
+ // .domain.com or .domain.com:port
+ // *.domain.com or *.domain.com:port
+ if strings.HasPrefix(phost, "*.") {
+ phost = phost[1:]
+ }
+ matchHost := false
+ if phost[0] != '.' {
+ matchHost = true
+ phost = "." + phost
+ }
+ c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost})
+ }
+}
+
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+ "socks5": "1080",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func canonicalAddr(url *url.URL) string {
+ addr := url.Hostname()
+ if v, err := idnaASCII(addr); err == nil {
+ addr = v
+ }
+ port := url.Port()
+ if port == "" {
+ port = portMap[url.Scheme]
+ }
+ return net.JoinHostPort(addr, port)
+}
+
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+func idnaASCII(v string) (string, error) {
+ // TODO: Consider removing this check after verifying performance is okay.
+ // Right now punycode verification, length checks, context checks, and the
+ // permissible character tests are all omitted. It also prevents the ToASCII
+ // call from salvaging an invalid IDN, when possible. As a result it may be
+ // possible to have two IDNs that appear identical to the user where the
+ // ASCII-only version causes an error downstream whereas the non-ASCII
+ // version does not.
+ // Note that for correct ASCII IDNs ToASCII will only do considerably more
+ // work, but it will not cause an allocation.
+ if isASCII(v) {
+ return v, nil
+ }
+ return idna.Lookup.ToASCII(v)
+}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+// matcher represents the matching rule for a given value in the NO_PROXY list
+type matcher interface {
+ // match returns true if the host and optional port or ip and optional port
+ // are allowed
+ match(host, port string, ip net.IP) bool
+}
+
+// allMatch matches on all possible inputs
+type allMatch struct{}
+
+func (a allMatch) match(host, port string, ip net.IP) bool {
+ return true
+}
+
+type cidrMatch struct {
+ cidr *net.IPNet
+}
+
+func (m cidrMatch) match(host, port string, ip net.IP) bool {
+ return m.cidr.Contains(ip)
+}
+
+type ipMatch struct {
+ ip net.IP
+ port string
+}
+
+func (m ipMatch) match(host, port string, ip net.IP) bool {
+ if m.ip.Equal(ip) {
+ return m.port == "" || m.port == port
+ }
+ return false
+}
+
+type domainMatch struct {
+ host string
+ port string
+
+ matchHost bool
+}
+
+func (m domainMatch) match(host, port string, ip net.IP) bool {
+ if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) {
+ return m.port == "" || m.port == port
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
new file mode 100644
index 0000000..c256483
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -0,0 +1,198 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+// if err != nil {
+// log.Fatal(err)
+// }
+// defer k.Close()
+//
+// s, _, err := k.GetStringValue("SystemRoot")
+// if err != nil {
+// log.Fatal(err)
+// }
+// fmt.Printf("Windows system root is %q\n", s)
+//
+package registry
+
+import (
+ "io"
+ "syscall"
+ "time"
+)
+
+const (
+ // Registry key security and access rights.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+ // for details.
+ ALL_ACCESS = 0xf003f
+ CREATE_LINK = 0x00020
+ CREATE_SUB_KEY = 0x00004
+ ENUMERATE_SUB_KEYS = 0x00008
+ EXECUTE = 0x20019
+ NOTIFY = 0x00010
+ QUERY_VALUE = 0x00001
+ READ = 0x20019
+ SET_VALUE = 0x00002
+ WOW64_32KEY = 0x00200
+ WOW64_64KEY = 0x00100
+ WRITE = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+ // Windows defines some predefined root keys that are always open.
+ // An application can use these keys as entry points to the registry.
+ // Normally these keys are used in OpenKey to open new keys,
+ // but they can also be used anywhere a Key is required.
+ CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT)
+ CURRENT_USER = Key(syscall.HKEY_CURRENT_USER)
+ LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE)
+ USERS = Key(syscall.HKEY_USERS)
+ CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG)
+ PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+ return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+ p, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+ var subkey syscall.Handle
+ err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(subkey), nil
+}
+
+// OpenRemoteKey opens a predefined registry key on another
+// computer pcname. The key to be opened is specified by k, but
+// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
+// If pcname is "", OpenRemoteKey returns local computer key.
+func OpenRemoteKey(pcname string, k Key) (Key, error) {
+ var err error
+ var p *uint16
+ if pcname != "" {
+ p, err = syscall.UTF16PtrFromString(`\\` + pcname)
+ if err != nil {
+ return 0, err
+ }
+ }
+ var remoteKey syscall.Handle
+ err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(remoteKey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadSubKeyNames(n int) ([]string, error) {
+ names := make([]string, 0)
+ // Registry key size limit is 255 bytes and described there:
+ // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+ buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+ for i := uint32(0); ; i++ {
+ if n > 0 {
+ if len(names) == n {
+ return names, nil
+ }
+ }
+ l := uint32(len(buf))
+ for {
+ err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ if n > len(names) {
+ return names, io.EOF
+ }
+ return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+ var h syscall.Handle
+ var d uint32
+ err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+ 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+ if err != nil {
+ return 0, false, err
+ }
+ return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+ return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+ SubKeyCount uint32
+ MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+ ValueCount uint32
+ MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+ MaxValueLen uint32 // longest data component among the key's values, in bytes
+ lastWriteTime syscall.Filetime
+}
+
+// ModTime returns the key's last write time.
+func (ki *KeyInfo) ModTime() time.Time {
+ return time.Unix(0, ki.lastWriteTime.Nanoseconds())
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+ var ki KeyInfo
+ err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+ &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+ &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+ if err != nil {
+ return nil, err
+ }
+ return &ki, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
new file mode 100644
index 0000000..50c32a3
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build generate
+
+package registry
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
new file mode 100644
index 0000000..e66643c
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package registry
+
+import "syscall"
+
+const (
+ _REG_OPTION_NON_VOLATILE = 0
+
+ _REG_CREATED_NEW_KEY = 1
+ _REG_OPENED_EXISTING_KEY = 2
+
+ _ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+ return procRegLoadMUIStringW.Find()
+}
+
+//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
+
+//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
new file mode 100644
index 0000000..f25e7e9
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -0,0 +1,386 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package registry
+
+import (
+ "errors"
+ "io"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ // Registry value types.
+ NONE = 0
+ SZ = 1
+ EXPAND_SZ = 2
+ BINARY = 3
+ DWORD = 4
+ DWORD_BIG_ENDIAN = 5
+ LINK = 6
+ MULTI_SZ = 7
+ RESOURCE_LIST = 8
+ FULL_RESOURCE_DESCRIPTOR = 9
+ RESOURCE_REQUIREMENTS_LIST = 10
+ QWORD = 11
+)
+
+var (
+ // ErrShortBuffer is returned when the buffer was too short for the operation.
+ ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+ // ErrNotExist is returned when a registry key or value does not exist.
+ ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+ // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+ ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return 0, 0, err
+ }
+ var pbuf *byte
+ if len(buf) > 0 {
+ pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+ }
+ l := uint32(len(buf))
+ err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+ if err != nil {
+ return int(l), valtype, err
+ }
+ return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return nil, 0, err
+ }
+ var t uint32
+ n := uint32(len(buf))
+ for {
+ err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+ if err == nil {
+ return buf[:n], t, nil
+ }
+ if err != syscall.ERROR_MORE_DATA {
+ return nil, 0, err
+ }
+ if n <= uint32(len(buf)) {
+ return nil, 0, err
+ }
+ buf = make([]byte, n)
+ }
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return "", typ, err2
+ }
+ switch typ {
+ case SZ, EXPAND_SZ:
+ default:
+ return "", typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return "", typ, nil
+ }
+ u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return "", err
+ }
+
+ buf := make([]uint16, 1024)
+ var buflen uint32
+ var pdir *uint16
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+ // Try to resolve the string value using the system directory as
+ // a DLL search path; this assumes the string value is of the form
+ // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+ // This approach works with tzres.dll but may have to be revised
+ // in the future to allow callers to provide custom search paths.
+
+ var s string
+ s, err = ExpandString("%SystemRoot%\\system32\\")
+ if err != nil {
+ return "", err
+ }
+ pdir, err = syscall.UTF16PtrFromString(s)
+ if err != nil {
+ return "", err
+ }
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+ if buflen <= uint32(len(buf)) {
+ break // Buffer not growing, assume race; break
+ }
+ buf = make([]uint16, buflen)
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+ if value == "" {
+ return "", nil
+ }
+ p, err := syscall.UTF16PtrFromString(value)
+ if err != nil {
+ return "", err
+ }
+ r := make([]uint16, 100)
+ for {
+ n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+ if err != nil {
+ return "", err
+ }
+ if n <= uint32(len(r)) {
+ return syscall.UTF16ToString(r[:n]), nil
+ }
+ r = make([]uint16, n)
+ }
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != MULTI_SZ {
+ return nil, typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return nil, typ, nil
+ }
+ p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ if len(p) == 0 {
+ return nil, typ, nil
+ }
+ if p[len(p)-1] == 0 {
+ p = p[:len(p)-1] // remove terminating null
+ }
+ val = make([]string, 0, 5)
+ from := 0
+ for i, c := range p {
+ if c == 0 {
+ val = append(val, string(utf16.Decode(p[from:i])))
+ from = i + 1
+ }
+ }
+ return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 8))
+ if err2 != nil {
+ return 0, typ, err2
+ }
+ switch typ {
+ case DWORD:
+ if len(data) != 4 {
+ return 0, typ, errors.New("DWORD value is not 4 bytes long")
+ }
+ var val32 uint32
+ copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+ return uint64(val32), DWORD, nil
+ case QWORD:
+ if len(data) != 8 {
+ return 0, typ, errors.New("QWORD value is not 8 bytes long")
+ }
+ copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+ return val, QWORD, nil
+ default:
+ return 0, typ, ErrUnexpectedType
+ }
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != BINARY {
+ return nil, typ, ErrUnexpectedType
+ }
+ return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+ }
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+ return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+ return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+ v, err := syscall.UTF16FromString(value)
+ if err != nil {
+ return err
+ }
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+ return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+ return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+ ss := ""
+ for _, s := range value {
+ for i := 0; i < len(s); i++ {
+ if s[i] == 0 {
+ return errors.New("string cannot have 0 inside")
+ }
+ }
+ ss += s + "\x00"
+ }
+ v := utf16.Encode([]rune(ss + "\x00"))
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+ return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+ return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+}
+
+// ReadValueNames returns the value names of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadValueNames(n int) ([]string, error) {
+ ki, err := k.Stat()
+ if err != nil {
+ return nil, err
+ }
+ names := make([]string, 0, ki.ValueCount)
+ buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+ for i := uint32(0); ; i++ {
+ if n > 0 {
+ if len(names) == n {
+ return names, nil
+ }
+ }
+ l := uint32(len(buf))
+ for {
+ err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ if n > len(names) {
+ return names, io.EOF
+ }
+ return names, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
new file mode 100644
index 0000000..3778075
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -0,0 +1,120 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
+ procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW")
+ procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW")
+ procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
+ procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW")
+ procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW")
+ procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW")
+ procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index dc25923..229e70d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -10,9 +10,37 @@ cloud.google.com/go/logging/apiv2
cloud.google.com/go/logging/internal
cloud.google.com/go/logging/logadmin
cloud.google.com/go/storage
+# github.com/Azure/azure-pipeline-go v0.2.1
+github.com/Azure/azure-pipeline-go/pipeline
+# github.com/Azure/azure-sdk-for-go v36.1.0+incompatible
+github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage
+github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights
+github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage
+github.com/Azure/azure-sdk-for-go/version
+# github.com/Azure/azure-storage-file-go v0.6.0
+github.com/Azure/azure-storage-file-go/azfile
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
+# github.com/Azure/go-autorest/autorest v0.9.2
+github.com/Azure/go-autorest/autorest
+github.com/Azure/go-autorest/autorest/azure
+# github.com/Azure/go-autorest/autorest/adal v0.8.0
+github.com/Azure/go-autorest/autorest/adal
+# github.com/Azure/go-autorest/autorest/azure/auth v0.4.0
+github.com/Azure/go-autorest/autorest/azure/auth
+# github.com/Azure/go-autorest/autorest/azure/cli v0.3.0
+github.com/Azure/go-autorest/autorest/azure/cli
+# github.com/Azure/go-autorest/autorest/date v0.2.0
+github.com/Azure/go-autorest/autorest/date
+# github.com/Azure/go-autorest/autorest/to v0.3.0
+github.com/Azure/go-autorest/autorest/to
+# github.com/Azure/go-autorest/autorest/validation v0.2.0
+github.com/Azure/go-autorest/autorest/validation
+# github.com/Azure/go-autorest/logger v0.1.0
+github.com/Azure/go-autorest/logger
+# github.com/Azure/go-autorest/tracing v0.5.0
+github.com/Azure/go-autorest/tracing
# github.com/Microsoft/go-winio v0.4.13
github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/pkg/guid
@@ -106,6 +134,10 @@ github.com/convox/version
github.com/creack/pty
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
+# github.com/dgrijalva/jwt-go v3.2.0+incompatible
+github.com/dgrijalva/jwt-go
+# github.com/dimchansky/utfbom v1.1.0
+github.com/dimchansky/utfbom
# github.com/docker/distribution v2.7.1+incompatible
github.com/docker/distribution/registry/api/errcode
# github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b
@@ -260,6 +292,8 @@ github.com/konsorten/go-windows-terminal-sequences
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
+# github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149
+github.com/mattn/go-ieproxy
# github.com/miekg/dns v1.1.15
github.com/miekg/dns
# github.com/mitchellh/go-homedir v1.1.0
@@ -319,11 +353,13 @@ go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
-# golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf
+# golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f
golang.org/x/crypto/acme
golang.org/x/crypto/acme/autocert
golang.org/x/crypto/ed25519
golang.org/x/crypto/ed25519/internal/edwards25519
+golang.org/x/crypto/pkcs12
+golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9
golang.org/x/net/bpf
@@ -332,6 +368,7 @@ golang.org/x/net/context/ctxhttp
golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/http/httpguts
+golang.org/x/net/http/httpproxy
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
@@ -353,6 +390,7 @@ golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20191104094858-e8c54fb511f6
golang.org/x/sys/unix
golang.org/x/sys/windows
+golang.org/x/sys/windows/registry
# golang.org/x/text v0.3.2
golang.org/x/text/secure/bidirule
golang.org/x/text/transform