mirror of
https://github.com/FlipsideCrypto/convox.git
synced 2026-02-06 10:56:56 +00:00
initial azure provider (#2)
* initial azure support * create internal service principal for aks * remove unused variables * remove unused variables
This commit is contained in:
parent
52836cce47
commit
8ceee1e50d
9
go.mod
9
go.mod
@ -4,6 +4,13 @@ go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.38.0
|
||||
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible
|
||||
github.com/Azure/azure-storage-file-go v0.6.0
|
||||
github.com/Azure/go-autorest/autorest v0.9.2
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190801035247-8694eade7dd3 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.5.0
|
||||
github.com/aws/aws-sdk-go v1.21.10
|
||||
@ -42,7 +49,7 @@ require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5
|
||||
golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf
|
||||
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f
|
||||
golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9 // indirect
|
||||
golang.org/x/sys v0.0.0-20191104094858-e8c54fb511f6 // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
|
||||
47
go.sum
47
go.sum
@ -3,8 +3,43 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible h1:smHlbChr/JDmsyUqELZXLs0YIgpXecIGdUibuc2983s=
|
||||
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-storage-file-go v0.6.0 h1:C8DY6l1s1c0mfQXC9ijI1ddDwHdIbvwoDH8agIT9ryk=
|
||||
github.com/Azure/azure-storage-file-go v0.6.0/go.mod h1:/En0UPyBtnVgniO08kDwCLL8letVdjIbjIeGmJeziaA=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
|
||||
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
@ -74,7 +109,10 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU=
|
||||
@ -425,6 +463,8 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN
|
||||
github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc=
|
||||
github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
@ -557,12 +597,13 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
|
||||
golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss=
|
||||
golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f h1:kz4KIr+xcPUsI3VMoqWfPMvtnJ6MGfiVwsWSVzphMO4=
|
||||
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
|
||||
@ -635,12 +676,14 @@ golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ=
|
||||
golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191104094858-e8c54fb511f6 h1:ZJUmhYTp8GbGC0ViZRc2U+MIYQ8xx9MscsdXnclfIhw=
|
||||
|
||||
41
provider/azure/app.go
Normal file
41
provider/azure/app.go
Normal file
@ -0,0 +1,41 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/convox/convox/pkg/structs"
|
||||
am "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (p *Provider) AppGet(name string) (*structs.App, error) {
|
||||
a, err := p.Provider.AppGet(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch a.Parameters["Router"] {
|
||||
case "dedicated":
|
||||
ing, err := p.Cluster.ExtensionsV1beta1().Ingresses(p.AppNamespace(a.Name)).Get(a.Name, am.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ing.Status.LoadBalancer.Ingress) > 0 {
|
||||
a.Router = ing.Status.LoadBalancer.Ingress[0].IP
|
||||
}
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (p *Provider) AppIdles(name string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *Provider) AppParameters() map[string]string {
|
||||
return map[string]string{
|
||||
"Router": "shared",
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) AppStatus(name string) (string, error) {
|
||||
return "running", nil
|
||||
}
|
||||
164
provider/azure/azure.go
Normal file
164
provider/azure/azure.go
Normal file
@ -0,0 +1,164 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
|
||||
"github.com/Azure/azure-storage-file-go/azfile"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure/auth"
|
||||
"github.com/convox/convox/pkg/structs"
|
||||
"github.com/convox/convox/provider/k8s"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
*k8s.Provider
|
||||
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
Region string
|
||||
Registry string
|
||||
ResourceGroup string
|
||||
StorageAccount string
|
||||
StorageShare string
|
||||
Subscription string
|
||||
Workspace string
|
||||
|
||||
insightLogs *operationalinsights.QueryClient
|
||||
storageDirectory *azfile.DirectoryURL
|
||||
}
|
||||
|
||||
func FromEnv() (*Provider, error) {
|
||||
k, err := k8s.FromEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &Provider{
|
||||
Provider: k,
|
||||
ClientID: os.Getenv("AZURE_CLIENT_ID"),
|
||||
ClientSecret: os.Getenv("AZURE_CLIENT_SECRET"),
|
||||
Region: os.Getenv("REGION"),
|
||||
Registry: os.Getenv("REGISTRY"),
|
||||
ResourceGroup: os.Getenv("RESOURCE_GROUP"),
|
||||
StorageAccount: os.Getenv("STORAGE_ACCOUNT"),
|
||||
StorageShare: os.Getenv("STORAGE_SHARE"),
|
||||
Subscription: os.Getenv("AZURE_SUBSCRIPTION_ID"),
|
||||
Workspace: os.Getenv("WORKSPACE"),
|
||||
}
|
||||
|
||||
fmt.Printf("p: %+v\n", p)
|
||||
|
||||
k.Engine = p
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *Provider) Initialize(opts structs.ProviderOptions) error {
|
||||
if err := p.initializeAzureServices(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.Provider.Initialize(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runtime.ErrorHandlers = []func(error){}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) WithContext(ctx context.Context) structs.Provider {
|
||||
pp := *p
|
||||
pp.Provider = pp.Provider.WithContext(ctx).(*k8s.Provider)
|
||||
return &pp
|
||||
}
|
||||
|
||||
func (p *Provider) initializeAzureServices() error {
|
||||
il, err := p.azureInsightLogs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.insightLogs = il
|
||||
|
||||
sd, err := p.azureStorageDirectory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.storageDirectory = sd
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) azureAuthorizer(resource string) (autorest.Authorizer, error) {
|
||||
a, err := auth.NewAuthorizerFromEnvironmentWithResource(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (p *Provider) azureInsightLogs() (*operationalinsights.QueryClient, error) {
|
||||
a, err := p.azureAuthorizer("https://api.loganalytics.io")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qc := operationalinsights.NewQueryClient()
|
||||
qc.Authorizer = a
|
||||
|
||||
return &qc, nil
|
||||
}
|
||||
|
||||
func (p *Provider) azureStorageDirectory() (*azfile.DirectoryURL, error) {
|
||||
k, err := p.azureStorageKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cred, err := azfile.NewSharedKeyCredential(p.StorageAccount, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pipe := azfile.NewPipeline(cred, azfile.PipelineOptions{})
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.file.core.windows.net", p.StorageAccount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir := azfile.NewServiceURL(*u, pipe).NewShareURL(p.StorageShare).NewRootDirectoryURL()
|
||||
|
||||
return &dir, nil
|
||||
}
|
||||
|
||||
func (p *Provider) azureStorageKey() (string, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
a, err := p.azureAuthorizer("https://management.azure.com")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ac := storage.NewAccountsClient(p.Subscription)
|
||||
ac.Authorizer = a
|
||||
|
||||
res, err := ac.ListKeys(ctx, p.ResourceGroup, p.StorageAccount, storage.Kerb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(*res.Keys) < 1 {
|
||||
return "", fmt.Errorf("could not find account key")
|
||||
}
|
||||
|
||||
return *(*res.Keys)[0].Value, nil
|
||||
}
|
||||
75
provider/azure/build.go
Normal file
75
provider/azure/build.go
Normal file
@ -0,0 +1,75 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/convox/convox/pkg/structs"
|
||||
)
|
||||
|
||||
func (p *Provider) BuildExport(app, id string, w io.Writer) error {
|
||||
if err := p.authAppRepository(app); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.Provider.BuildExport(app, id, w)
|
||||
}
|
||||
|
||||
func (p *Provider) BuildImport(app string, r io.Reader) (*structs.Build, error) {
|
||||
if err := p.authAppRepository(app); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.Provider.BuildImport(app, r)
|
||||
}
|
||||
|
||||
func (p *Provider) BuildLogs(app, id string, opts structs.LogsOptions) (io.ReadCloser, error) {
|
||||
b, err := p.BuildGet(app, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.Since = nil
|
||||
|
||||
switch b.Status {
|
||||
case "running":
|
||||
return p.ProcessLogs(app, b.Process, opts)
|
||||
default:
|
||||
u, err := url.Parse(b.Logs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "object":
|
||||
return p.ObjectFetch(u.Hostname(), u.Path)
|
||||
default:
|
||||
return nil, fmt.Errorf("unable to read logs for build: %s", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) authAppRepository(app string) error {
|
||||
repo, _, err := p.RepositoryHost(app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
user, pass, err := p.RepositoryAuth(app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command("docker", "login", "-u", user, "--password-stdin", repo)
|
||||
|
||||
cmd.Stdin = strings.NewReader(pass)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
46
provider/azure/certificate.go
Normal file
46
provider/azure/certificate.go
Normal file
@ -0,0 +1,46 @@
|
||||
package azure
|
||||
|
||||
// import (
|
||||
// "fmt"
|
||||
|
||||
// gc "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/apis/networking.gke.io/v1beta1"
|
||||
// "github.com/convox/convox/pkg/structs"
|
||||
// am "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// )
|
||||
|
||||
// func (p *Provider) CertificateGenerate(domains []string) (*structs.Certificate, error) {
|
||||
// switch len(domains) {
|
||||
// case 0:
|
||||
// return nil, fmt.Errorf("must specify a domain")
|
||||
// case 1:
|
||||
// default:
|
||||
// return nil, fmt.Errorf("must specify only one domain for gcp managed certificates")
|
||||
// }
|
||||
|
||||
// gmc, err := p.gkeManagedCertsClient()
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// cert := &gc.ManagedCertificate{
|
||||
// ObjectMeta: am.ObjectMeta{
|
||||
// GenerateName: "managed-",
|
||||
// Namespace: p.Namespace,
|
||||
// },
|
||||
// Spec: gc.ManagedCertificateSpec{
|
||||
// Domains: domains,
|
||||
// },
|
||||
// Status: gc.ManagedCertificateStatus{
|
||||
// DomainStatus: []gc.DomainStatus{},
|
||||
// },
|
||||
// }
|
||||
|
||||
// c, err := gmc.NetworkingV1beta1().ManagedCertificates(p.Namespace).Create(cert)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// fmt.Printf("c: %+v\n", c)
|
||||
|
||||
// return &structs.Certificate{}, nil
|
||||
// }
|
||||
5
provider/azure/deployment.go
Normal file
5
provider/azure/deployment.go
Normal file
@ -0,0 +1,5 @@
|
||||
package azure
|
||||
|
||||
func (p *Provider) DeploymentTimeout() int {
|
||||
return 1800
|
||||
}
|
||||
9
provider/azure/heartbeat.go
Normal file
9
provider/azure/heartbeat.go
Normal file
@ -0,0 +1,9 @@
|
||||
package azure
|
||||
|
||||
func (p *Provider) Heartbeat() (map[string]interface{}, error) {
|
||||
hs := map[string]interface{}{
|
||||
"region": p.Region,
|
||||
}
|
||||
|
||||
return hs, nil
|
||||
}
|
||||
106
provider/azure/helpers.go
Normal file
106
provider/azure/helpers.go
Normal file
@ -0,0 +1,106 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
// gv "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/clientgen/clientset/versioned"
|
||||
am "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (p *Provider) appRegistry(app string) (string, error) {
|
||||
ns, err := p.Provider.Cluster.CoreV1().Namespaces().Get(p.AppNamespace(app), am.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
registry, ok := ns.ObjectMeta.Annotations["convox.registry"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no registry for app: %s", app)
|
||||
}
|
||||
|
||||
return registry, nil
|
||||
}
|
||||
|
||||
// func (p *Provider) gkeManagedCertsClient() (gv.Interface, error) {
|
||||
// return gv.NewForConfig(p.Config)
|
||||
// }
|
||||
|
||||
func (p *Provider) watchForProcessTermination(ctx context.Context, app, pid string, cancel func()) {
|
||||
defer cancel()
|
||||
|
||||
tick := time.NewTicker(2 * time.Second)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-tick.C:
|
||||
if _, err := p.ProcessGet(app, pid); err != nil {
|
||||
time.Sleep(2 * time.Second)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func kubectl(args ...string) error {
|
||||
cmd := exec.Command("kubectl", args...)
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.New(strings.TrimSpace(string(out)))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var outputConverter = regexp.MustCompile("([a-z])([A-Z])") // lower case letter followed by upper case
|
||||
|
||||
func outputToEnvironment(name string) string {
|
||||
return strings.ToUpper(outputConverter.ReplaceAllString(name, "${1}_${2}"))
|
||||
}
|
||||
|
||||
func upperName(name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// replace underscores with dashes
|
||||
name = strings.Replace(name, "_", "-", -1)
|
||||
|
||||
// myapp -> Myapp; my-app -> MyApp
|
||||
us := strings.ToUpper(name[0:1]) + name[1:]
|
||||
|
||||
for {
|
||||
i := strings.Index(us, "-")
|
||||
|
||||
if i == -1 {
|
||||
break
|
||||
}
|
||||
|
||||
s := us[0:i]
|
||||
|
||||
if len(us) > i+1 {
|
||||
s += strings.ToUpper(us[i+1 : i+2])
|
||||
}
|
||||
|
||||
if len(us) > i+2 {
|
||||
s += us[i+2:]
|
||||
}
|
||||
|
||||
us = s
|
||||
}
|
||||
|
||||
return us
|
||||
}
|
||||
23
provider/azure/ingress.go
Normal file
23
provider/azure/ingress.go
Normal file
@ -0,0 +1,23 @@
|
||||
package azure
|
||||
|
||||
func (p *Provider) IngressAnnotations(app string) (map[string]string, error) {
|
||||
a, err := p.AppGet(app)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ans := map[string]string{
|
||||
"kubernetes.io/ingress.class": "convox",
|
||||
}
|
||||
|
||||
switch a.Parameters["Router"] {
|
||||
case "dedicated":
|
||||
ans["kubernetes.io/ingress.class"] = "gce"
|
||||
}
|
||||
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func (p *Provider) IngressSecrets(app string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
121
provider/azure/log.go
Normal file
121
provider/azure/log.go
Normal file
@ -0,0 +1,121 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
|
||||
"github.com/convox/convox/pkg/common"
|
||||
"github.com/convox/convox/pkg/options"
|
||||
"github.com/convox/convox/pkg/structs"
|
||||
)
|
||||
|
||||
// var sequenceTokens sync.Map
|
||||
|
||||
func (p *Provider) Log(app, stream string, ts time.Time, message string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) AppLogs(name string, opts structs.LogsOptions) (io.ReadCloser, error) {
|
||||
r, w := io.Pipe()
|
||||
|
||||
go p.insightContainerLogs(p.Context(), w, p.AppNamespace(name), opts)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (p *Provider) SystemLogs(opts structs.LogsOptions) (io.ReadCloser, error) {
|
||||
return p.AppLogs("system", opts)
|
||||
}
|
||||
|
||||
func (p *Provider) insightContainerLogs(ctx context.Context, w io.WriteCloser, namespace string, opts structs.LogsOptions) {
|
||||
defer w.Close()
|
||||
|
||||
since := common.DefaultDuration(opts.Since, 0)
|
||||
start := time.Now().Add(-1 * since)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
// check for closed writer
|
||||
if _, err := w.Write([]byte{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
query := operationalinsights.QueryBody{
|
||||
Query: options.String(fmt.Sprintf("KubePodInventory | join kind=innerunique ContainerLog on ContainerID | project Timestamp=TimeGenerated1,Message=LogEntry,Namespace,Pod=Name,Labels=PodLabel | where Namespace==%q and Timestamp > datetime(%s) | order by Timestamp asc | limit 100", namespace, start.Format("2006-01-02 15:04:05.000"))),
|
||||
Timespan: options.String("P7D"),
|
||||
}
|
||||
|
||||
res, err := p.insightLogs.Execute(context.Background(), p.Workspace, query)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %+v\n", err)
|
||||
return
|
||||
}
|
||||
if len(*res.Tables) < 1 {
|
||||
fmt.Println("no tables")
|
||||
return
|
||||
}
|
||||
|
||||
t := (*res.Tables)[0]
|
||||
|
||||
if len(*t.Rows) == 0 && !common.DefaultBool(opts.Follow, true) {
|
||||
return
|
||||
}
|
||||
|
||||
for _, row := range *t.Rows {
|
||||
attrs := parseRow(row, *t.Columns)
|
||||
|
||||
ts, err := time.Parse("2006-01-02T15:04:05.999Z", attrs["Timestamp"])
|
||||
if err != nil {
|
||||
fmt.Printf("err: %+v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if ts.After(start) {
|
||||
start = ts
|
||||
}
|
||||
|
||||
var labels map[string]string
|
||||
|
||||
if err := json.Unmarshal([]byte(strings.Trim(attrs["Labels"], "[]")), &labels); err != nil {
|
||||
fmt.Printf("err: %+v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
service := labels["service"]
|
||||
pod := attrs["Pod"]
|
||||
|
||||
prefix := ""
|
||||
|
||||
if common.DefaultBool(opts.Prefix, false) {
|
||||
prefix = fmt.Sprintf("%s service/%s/%s ", ts.Format(time.RFC3339), service, pod)
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte(fmt.Sprintf("%s%s\n", prefix, attrs["Message"]))); err != nil {
|
||||
fmt.Printf("err: %+v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseRow(row []interface{}, cols []operationalinsights.Column) map[string]string {
|
||||
attrs := map[string]string{}
|
||||
|
||||
for i, c := range cols {
|
||||
if v, ok := row[i].(string); ok && c.Name != nil {
|
||||
attrs[*c.Name] = v
|
||||
}
|
||||
}
|
||||
|
||||
return attrs
|
||||
}
|
||||
25
provider/azure/manifest.go
Normal file
25
provider/azure/manifest.go
Normal file
@ -0,0 +1,25 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/convox/convox/pkg/manifest"
|
||||
)
|
||||
|
||||
func (p *Provider) ManifestValidate(m *manifest.Manifest) error {
|
||||
errs := []string{}
|
||||
|
||||
for _, s := range m.Services {
|
||||
if len(s.Volumes) > 0 {
|
||||
errs = append(errs, fmt.Sprintf("shared volumes are not supported on gcp"))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("manifest valiation errors:\n%s", strings.Join(errs, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
191
provider/azure/object.go
Normal file
191
provider/azure/object.go
Normal file
@ -0,0 +1,191 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-file-go/azfile"
|
||||
"github.com/convox/convox/pkg/structs"
|
||||
)
|
||||
|
||||
func (p *Provider) ObjectDelete(app, key string) error {
|
||||
ctx := p.Context()
|
||||
|
||||
if _, err := p.storageFile(p.objectKey(app, key)).Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) ObjectExists(app, key string) (bool, error) {
|
||||
if _, err := p.storageFile(p.objectKey(app, key)).GetProperties(p.Context()); err != nil {
|
||||
if azerr, ok := err.(azfile.StorageError); ok && azerr.ServiceCode() == "ResourceNotFound" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *Provider) ObjectFetch(app, key string) (io.ReadCloser, error) {
|
||||
ctx := p.Context()
|
||||
|
||||
res, err := p.storageFile(p.objectKey(app, key)).Download(ctx, 0, azfile.CountToEnd, false)
|
||||
if err != nil {
|
||||
if azerr, ok := err.(azfile.StorageError); ok && azerr.ServiceCode() == "ResourceNotFound" {
|
||||
return nil, fmt.Errorf("no such key")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := res.Body(azfile.RetryReaderOptions{})
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (p *Provider) ObjectList(app, prefix string) ([]string, error) {
|
||||
ctx := p.Context()
|
||||
|
||||
dir := p.storageDirectory.NewDirectoryURL(p.objectKey(app, prefix))
|
||||
|
||||
fs := []string{}
|
||||
|
||||
for marker := (azfile.Marker{}); marker.NotDone(); {
|
||||
res, err := dir.ListFilesAndDirectoriesSegment(ctx, marker, azfile.ListFilesAndDirectoriesOptions{})
|
||||
if err != nil {
|
||||
if azerr, ok := err.(azfile.StorageError); ok && azerr.ServiceCode() == "ResourceNotFound" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
marker = res.NextMarker
|
||||
|
||||
for _, file := range res.FileItems {
|
||||
fs = append(fs, file.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (p *Provider) ObjectStore(app, key string, r io.Reader, opts structs.ObjectStoreOptions) (*structs.Object, error) {
|
||||
ctx := p.Context()
|
||||
|
||||
if key == "" {
|
||||
k, err := generateTempKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = k
|
||||
}
|
||||
|
||||
name := p.objectKey(app, key)
|
||||
|
||||
if err := p.storageMkdir(name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fw, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fw.Close()
|
||||
defer os.Remove(fw.Name())
|
||||
|
||||
if _, err := io.Copy(fw, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := fw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fr, err := os.Open(fw.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fr.Close()
|
||||
|
||||
stat, err := fr.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file := p.storageFile(name)
|
||||
|
||||
if _, err := file.Create(ctx, stat.Size(), azfile.FileHTTPHeaders{}, azfile.Metadata{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := file.UploadRange(ctx, 0, fr, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("object://%s/%s", app, key)
|
||||
|
||||
o := &structs.Object{Url: url}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (p *Provider) objectKey(app, key string) string {
|
||||
return fmt.Sprintf("%s/%s", app, strings.TrimPrefix(key, "/"))
|
||||
}
|
||||
|
||||
func (p *Provider) storageFile(key string) azfile.FileURL {
|
||||
return p.storageDirectory.NewFileURL(key)
|
||||
}
|
||||
|
||||
func (p *Provider) storageMkdir(file string) error {
|
||||
ctx := p.Context()
|
||||
|
||||
parts := strings.Split(file, "/")
|
||||
if len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dir := *p.storageDirectory
|
||||
|
||||
for _, name := range parts[0 : len(parts)-1] {
|
||||
dir = dir.NewDirectoryURL(name)
|
||||
|
||||
if _, err := dir.Create(ctx, azfile.Metadata{}); err != nil {
|
||||
if azerr, ok := err.(azfile.StorageError); ok {
|
||||
if azerr.ServiceCode() == "ResourceAlreadyExists" {
|
||||
continue
|
||||
}
|
||||
if azerr.ServiceCode() == "ResourceTypeMismatch" {
|
||||
return fmt.Errorf("unable to create directory")
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateTempKey() (string, error) {
|
||||
data := make([]byte, 1024)
|
||||
|
||||
if _, err := rand.Read(data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
|
||||
return fmt.Sprintf("tmp/%s", hex.EncodeToString(hash[:])[0:30]), nil
|
||||
}
|
||||
13
provider/azure/repository.go
Normal file
13
provider/azure/repository.go
Normal file
@ -0,0 +1,13 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (p *Provider) RepositoryAuth(app string) (string, string, error) {
|
||||
return p.ClientID, p.ClientSecret, nil
|
||||
}
|
||||
|
||||
func (p *Provider) RepositoryHost(app string) (string, bool, error) {
|
||||
return fmt.Sprintf("%s/%s", p.Registry, app), true, nil
|
||||
}
|
||||
7
provider/azure/resolver.go
Normal file
7
provider/azure/resolver.go
Normal file
@ -0,0 +1,7 @@
|
||||
package azure
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (p *Provider) Resolver() (string, error) {
|
||||
return "", fmt.Errorf("no resolver")
|
||||
}
|
||||
11
provider/azure/service.go
Normal file
11
provider/azure/service.go
Normal file
@ -0,0 +1,11 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/convox/convox/pkg/manifest"
|
||||
)
|
||||
|
||||
func (p *Provider) ServiceHost(app string, s manifest.Service) string {
|
||||
return fmt.Sprintf("%s.%s.%s", s.Name, app, p.Domain)
|
||||
}
|
||||
9
provider/azure/system.go
Normal file
9
provider/azure/system.go
Normal file
@ -0,0 +1,9 @@
|
||||
package azure
|
||||
|
||||
func (p *Provider) SystemHost() string {
|
||||
return p.Domain
|
||||
}
|
||||
|
||||
func (p *Provider) SystemStatus() (string, error) {
|
||||
return "running", nil
|
||||
}
|
||||
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/convox/convox/pkg/structs"
|
||||
"github.com/convox/convox/provider/aws"
|
||||
"github.com/convox/convox/provider/azure"
|
||||
"github.com/convox/convox/provider/do"
|
||||
"github.com/convox/convox/provider/gcp"
|
||||
"github.com/convox/convox/provider/k8s"
|
||||
@ -19,6 +20,8 @@ func FromEnv() (structs.Provider, error) {
|
||||
switch name {
|
||||
case "aws":
|
||||
return aws.FromEnv()
|
||||
case "azure":
|
||||
return azure.FromEnv()
|
||||
case "do":
|
||||
return do.FromEnv()
|
||||
case "gcp":
|
||||
|
||||
44
terraform/api/azure/identity.tf
Normal file
44
terraform/api/azure/identity.tf
Normal file
@ -0,0 +1,44 @@
|
||||
# resource "azurerm_user_assigned_identity" "api" {
|
||||
# resource_group_name = data.azurerm_resource_group.rack.name
|
||||
# location = data.azurerm_resource_group.rack.location
|
||||
|
||||
# name = "api"
|
||||
# }
|
||||
|
||||
# resource "azurerm_role_assignment" "identity-api-contributor" {
|
||||
# scope = data.azurerm_resource_group.rack.id
|
||||
# role_definition_name = "Contributor"
|
||||
# principal_id = azurerm_user_assigned_identity.api.principal_id
|
||||
# }
|
||||
|
||||
# data "template_file" "identity" {
|
||||
# template = file("${path.module}/identity.yml.tpl")
|
||||
|
||||
# vars = {
|
||||
# namespace = var.namespace
|
||||
# resource = azurerm_user_assigned_identity.api.id
|
||||
# client = azurerm_user_assigned_identity.api.client_id
|
||||
# }
|
||||
# }
|
||||
|
||||
# resource "null_resource" "deployment" {
|
||||
# provisioner "local-exec" {
|
||||
# when = "create"
|
||||
# command = "echo '${data.template_file.identity.rendered}' | kubectl apply -f -"
|
||||
# environment = {
|
||||
# "KUBECONFIG" : var.kubeconfig,
|
||||
# }
|
||||
# }
|
||||
|
||||
# provisioner "local-exec" {
|
||||
# when = "destroy"
|
||||
# command = "echo '${data.template_file.identity.rendered}' | kubectl delete -f -"
|
||||
# environment = {
|
||||
# "KUBECONFIG" : var.kubeconfig,
|
||||
# }
|
||||
# }
|
||||
|
||||
# triggers = {
|
||||
# template = sha256(data.template_file.identity.rendered)
|
||||
# }
|
||||
# }
|
||||
18
terraform/api/azure/identity.yml.tpl
Normal file
18
terraform/api/azure/identity.yml.tpl
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: "aadpodidentity.k8s.io/v1"
|
||||
kind: AzureIdentity
|
||||
metadata:
|
||||
namespace: ${namespace}
|
||||
name: api
|
||||
spec:
|
||||
type: 0
|
||||
ResourceID: ${resource}
|
||||
ClientID: ${client}
|
||||
---
|
||||
apiVersion: "aadpodidentity.k8s.io/v1"
|
||||
kind: AzureIdentityBinding
|
||||
metadata:
|
||||
namespace: ${namespace}
|
||||
name: api
|
||||
spec:
|
||||
AzureIdentity: api
|
||||
Selector: api
|
||||
77
terraform/api/azure/main.tf
Normal file
77
terraform/api/azure/main.tf
Normal file
@ -0,0 +1,77 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "azuread" {
|
||||
version = "~> 0.7"
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
version = "~> 1.36"
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
version = "~> 1.8"
|
||||
|
||||
config_path = var.kubeconfig
|
||||
}
|
||||
|
||||
provider "template" {
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
locals {
|
||||
tags = {
|
||||
System = "convox"
|
||||
Rack = var.name
|
||||
}
|
||||
}
|
||||
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
data "azurerm_resource_group" "rack" {
|
||||
name = var.resource_group
|
||||
}
|
||||
|
||||
data "azurerm_subscription" "current" {}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 12
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
module "k8s" {
|
||||
source = "../k8s"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
domain = var.domain
|
||||
kubeconfig = var.kubeconfig
|
||||
name = var.name
|
||||
namespace = var.namespace
|
||||
release = var.release
|
||||
|
||||
annotations = {}
|
||||
|
||||
labels = {
|
||||
"aadpodidbinding" : "api"
|
||||
}
|
||||
|
||||
env = {
|
||||
AZURE_CLIENT_ID = azuread_service_principal.api.application_id
|
||||
AZURE_CLIENT_SECRET = azuread_service_principal_password.api.value
|
||||
AZURE_SUBSCRIPTION_ID = data.azurerm_subscription.current.subscription_id
|
||||
AZURE_TENANT_ID = data.azurerm_client_config.current.tenant_id
|
||||
PROVIDER = "azure"
|
||||
REGION = var.region
|
||||
REGISTRY = azurerm_container_registry.registry.login_server
|
||||
RESOURCE_GROUP = var.resource_group
|
||||
ROUTER = var.router
|
||||
STORAGE_ACCOUNT = azurerm_storage_account.storage.name
|
||||
STORAGE_SHARE = azurerm_storage_share.storage.name
|
||||
WORKSPACE = var.workspace
|
||||
}
|
||||
}
|
||||
3
terraform/api/azure/outputs.tf
Normal file
3
terraform/api/azure/outputs.tf
Normal file
@ -0,0 +1,3 @@
|
||||
output "endpoint" {
|
||||
value = module.k8s.endpoint
|
||||
}
|
||||
28
terraform/api/azure/principal.tf
Normal file
28
terraform/api/azure/principal.tf
Normal file
@ -0,0 +1,28 @@
|
||||
resource "azuread_application" "api" {
|
||||
name = "api"
|
||||
available_to_other_tenants = false
|
||||
oauth2_allow_implicit_flow = true
|
||||
}
|
||||
|
||||
resource "azuread_service_principal" "api" {
|
||||
application_id = azuread_application.api.application_id
|
||||
app_role_assignment_required = false
|
||||
}
|
||||
|
||||
resource "random_string" "api_password" {
|
||||
length = 30
|
||||
special = true
|
||||
upper = true
|
||||
}
|
||||
|
||||
resource "azuread_service_principal_password" "api" {
|
||||
service_principal_id = azuread_service_principal.api.id
|
||||
value = random_string.api_password.result
|
||||
end_date = "2099-01-01T00:00:00Z"
|
||||
}
|
||||
|
||||
resource "azurerm_role_assignment" "principal_api_contributor" {
|
||||
scope = data.azurerm_resource_group.rack.id
|
||||
role_definition_name = "Contributor"
|
||||
principal_id = azuread_service_principal.api.id
|
||||
}
|
||||
6
terraform/api/azure/registry.tf
Normal file
6
terraform/api/azure/registry.tf
Normal file
@ -0,0 +1,6 @@
|
||||
resource "azurerm_container_registry" "registry" {
|
||||
name = "${format("%.12s", var.name)}${random_string.suffix.result}"
|
||||
resource_group_name = "${data.azurerm_resource_group.rack.name}"
|
||||
location = "${data.azurerm_resource_group.rack.location}"
|
||||
sku = "Basic"
|
||||
}
|
||||
12
terraform/api/azure/storage.tf
Normal file
12
terraform/api/azure/storage.tf
Normal file
@ -0,0 +1,12 @@
|
||||
resource "azurerm_storage_account" "storage" {
|
||||
name = "${format("%.12s", var.name)}${random_string.suffix.result}"
|
||||
resource_group_name = "${data.azurerm_resource_group.rack.name}"
|
||||
location = "${data.azurerm_resource_group.rack.location}"
|
||||
account_tier = "Standard"
|
||||
account_replication_type = "LRS"
|
||||
}
|
||||
|
||||
resource "azurerm_storage_share" "storage" {
|
||||
name = "storage"
|
||||
storage_account_name = azurerm_storage_account.storage.name
|
||||
}
|
||||
35
terraform/api/azure/variables.tf
Normal file
35
terraform/api/azure/variables.tf
Normal file
@ -0,0 +1,35 @@
|
||||
variable "domain" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "kubeconfig" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "release" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "router" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "workspace" {
|
||||
type = "string"
|
||||
}
|
||||
@ -259,7 +259,7 @@ resource "kubernetes_ingress" "api" {
|
||||
name = "api"
|
||||
|
||||
annotations = {
|
||||
"convox.idles" : "true"
|
||||
"convox.idles" : "false"
|
||||
"convox.ingress.service.api.5443.protocol" : "https"
|
||||
}
|
||||
|
||||
|
||||
19
terraform/cluster/azure/kubeconfig.tpl
Normal file
19
terraform/cluster/azure/kubeconfig.tpl
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ${ca}
|
||||
server: ${endpoint}
|
||||
name: gcloud
|
||||
contexts:
|
||||
- context:
|
||||
cluster: gcloud
|
||||
user: gcloud
|
||||
name: gcloud
|
||||
current-context: gcloud
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: gcloud
|
||||
user:
|
||||
client-certificate-data: ${client_certificate}
|
||||
client-key-data: ${client_key}
|
||||
84
terraform/cluster/azure/main.tf
Normal file
84
terraform/cluster/azure/main.tf
Normal file
@ -0,0 +1,84 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
version = "~> 1.36"
|
||||
}
|
||||
|
||||
provider "local" {
|
||||
version = "~> 1.3"
|
||||
}
|
||||
|
||||
provider "random" {
|
||||
version = "~> 2.2"
|
||||
}
|
||||
|
||||
data "azurerm_resource_group" "system" {
|
||||
name = var.resource_group
|
||||
}
|
||||
|
||||
data "azurerm_kubernetes_service_versions" "available" {
|
||||
location = var.region
|
||||
version_prefix = "1.14."
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
resource "azurerm_log_analytics_workspace" "rack" {
|
||||
name = "${var.name}-${random_string.suffix.result}"
|
||||
location = "${data.azurerm_resource_group.system.location}"
|
||||
resource_group_name = "${data.azurerm_resource_group.system.name}"
|
||||
sku = "PerGB2018"
|
||||
retention_in_days = 30
|
||||
}
|
||||
|
||||
resource "azurerm_kubernetes_cluster" "rack" {
|
||||
name = var.name
|
||||
location = data.azurerm_resource_group.system.location
|
||||
resource_group_name = data.azurerm_resource_group.system.name
|
||||
dns_prefix = var.name
|
||||
kubernetes_version = data.azurerm_kubernetes_service_versions.available.latest_version
|
||||
|
||||
agent_pool_profile {
|
||||
name = "default"
|
||||
count = 3
|
||||
vm_size = var.node_type
|
||||
os_type = "Linux"
|
||||
os_disk_size_gb = 30
|
||||
}
|
||||
|
||||
addon_profile {
|
||||
oms_agent {
|
||||
enabled = true
|
||||
log_analytics_workspace_id = azurerm_log_analytics_workspace.rack.id
|
||||
}
|
||||
}
|
||||
|
||||
service_principal {
|
||||
client_id = azuread_service_principal.cluster.application_id
|
||||
client_secret = azuread_service_principal_password.cluster.value
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
depends_on = [
|
||||
azurerm_kubernetes_cluster.rack,
|
||||
]
|
||||
|
||||
filename = pathexpand("~/.kube/config.azure.${var.name}")
|
||||
content = templatefile("${path.module}/kubeconfig.tpl", {
|
||||
ca = azurerm_kubernetes_cluster.rack.kube_config.0.cluster_ca_certificate
|
||||
endpoint = azurerm_kubernetes_cluster.rack.kube_config.0.host
|
||||
client_certificate = azurerm_kubernetes_cluster.rack.kube_config.0.client_certificate
|
||||
client_key = azurerm_kubernetes_cluster.rack.kube_config.0.client_key
|
||||
})
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [content]
|
||||
}
|
||||
}
|
||||
11
terraform/cluster/azure/outputs.tf
Normal file
11
terraform/cluster/azure/outputs.tf
Normal file
@ -0,0 +1,11 @@
|
||||
output "kubeconfig" {
|
||||
depends_on = [
|
||||
local_file.kubeconfig,
|
||||
azurerm_kubernetes_cluster.rack,
|
||||
]
|
||||
value = local_file.kubeconfig.filename
|
||||
}
|
||||
|
||||
output "workspace" {
|
||||
value = azurerm_log_analytics_workspace.rack.workspace_id
|
||||
}
|
||||
28
terraform/cluster/azure/principal.tf
Normal file
28
terraform/cluster/azure/principal.tf
Normal file
@ -0,0 +1,28 @@
|
||||
resource "azuread_application" "cluster" {
|
||||
name = "cluster"
|
||||
available_to_other_tenants = false
|
||||
oauth2_allow_implicit_flow = true
|
||||
}
|
||||
|
||||
resource "azuread_service_principal" "cluster" {
|
||||
application_id = azuread_application.cluster.application_id
|
||||
app_role_assignment_required = false
|
||||
}
|
||||
|
||||
resource "random_string" "cluster_password" {
|
||||
length = 30
|
||||
special = true
|
||||
upper = true
|
||||
}
|
||||
|
||||
resource "azuread_service_principal_password" "cluster" {
|
||||
service_principal_id = azuread_service_principal.cluster.id
|
||||
value = random_string.cluster_password.result
|
||||
end_date = "2099-01-01T00:00:00Z"
|
||||
}
|
||||
|
||||
resource "azurerm_role_assignment" "cluster-contributor" {
|
||||
scope = data.azurerm_resource_group.system.id
|
||||
role_definition_name = "Contributor"
|
||||
principal_id = azuread_service_principal.cluster.id
|
||||
}
|
||||
15
terraform/cluster/azure/variables.tf
Normal file
15
terraform/cluster/azure/variables.tf
Normal file
@ -0,0 +1,15 @@
|
||||
variable "name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
type = string
|
||||
}
|
||||
61
terraform/rack/azure/main.tf
Normal file
61
terraform/rack/azure/main.tf
Normal file
@ -0,0 +1,61 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
varsion = "~> 1.36"
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
version = "~> 1.9"
|
||||
|
||||
config_path = var.kubeconfig
|
||||
}
|
||||
|
||||
module "k8s" {
|
||||
source = "../k8s"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
domain = module.router.endpoint
|
||||
kubeconfig = var.kubeconfig
|
||||
name = var.name
|
||||
release = var.release
|
||||
}
|
||||
|
||||
module "api" {
|
||||
source = "../../api/azure"
|
||||
|
||||
providers = {
|
||||
azurerm = azurerm
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
domain = module.router.endpoint
|
||||
kubeconfig = var.kubeconfig
|
||||
name = var.name
|
||||
namespace = module.k8s.namespace
|
||||
region = var.region
|
||||
release = var.release
|
||||
resource_group = var.resource_group
|
||||
router = module.router.endpoint
|
||||
# secret = random_string.secret.result
|
||||
workspace = var.workspace
|
||||
}
|
||||
|
||||
module "router" {
|
||||
source = "../../router/azure"
|
||||
|
||||
providers = {
|
||||
azurerm = azurerm
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
name = var.name
|
||||
namespace = module.k8s.namespace
|
||||
region = var.region
|
||||
release = var.release
|
||||
resource_group = var.resource_group
|
||||
}
|
||||
7
terraform/rack/azure/outputs.tf
Normal file
7
terraform/rack/azure/outputs.tf
Normal file
@ -0,0 +1,7 @@
|
||||
output "api" {
|
||||
value = module.api.endpoint
|
||||
}
|
||||
|
||||
output "endpoint" {
|
||||
value = module.router.endpoint
|
||||
}
|
||||
174
terraform/rack/azure/registry.tf
Normal file
174
terraform/rack/azure/registry.tf
Normal file
@ -0,0 +1,174 @@
|
||||
# resource "random_string" "suffix" {
|
||||
# length = 12
|
||||
# special = false
|
||||
# upper = false
|
||||
# }
|
||||
|
||||
# resource "digitalocean_spaces_bucket" "registry" {
|
||||
# name = "${var.name}-registry-${random_string.suffix.result}"
|
||||
# region = var.region
|
||||
# acl = "private"
|
||||
# }
|
||||
|
||||
# resource "random_string" "secret" {
|
||||
# length = 30
|
||||
# }
|
||||
|
||||
# resource "kubernetes_deployment" "registry" {
|
||||
# metadata {
|
||||
# namespace = module.k8s.namespace
|
||||
# name = "registry"
|
||||
|
||||
# labels = {
|
||||
# serivce = "registry"
|
||||
# }
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# min_ready_seconds = 1
|
||||
# revision_history_limit = 0
|
||||
|
||||
# selector {
|
||||
# match_labels = {
|
||||
# system = "convox"
|
||||
# service = "registry"
|
||||
# }
|
||||
# }
|
||||
|
||||
# strategy {
|
||||
# type = "RollingUpdate"
|
||||
# rolling_update {
|
||||
# max_surge = 1
|
||||
# max_unavailable = 0
|
||||
# }
|
||||
# }
|
||||
|
||||
# template {
|
||||
# metadata {
|
||||
# labels = {
|
||||
# system = "convox"
|
||||
# service = "registry"
|
||||
# }
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# container {
|
||||
# name = "main"
|
||||
# image = "registry:2"
|
||||
# image_pull_policy = "IfNotPresent"
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_HTTP_SECRET"
|
||||
# value = random_string.secret.result
|
||||
# }
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_STORAGE"
|
||||
# value = "s3"
|
||||
# }
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_STORAGE_S3_ACCESSKEY"
|
||||
# value = var.access_id
|
||||
# }
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_STORAGE_S3_BUCKET"
|
||||
# value = digitalocean_spaces_bucket.registry.name
|
||||
# }
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_STORAGE_S3_REGION"
|
||||
# value = var.region
|
||||
# }
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_STORAGE_S3_REGIONENDPOINT"
|
||||
# value = "https://${var.region}.digitaloceanspaces.com"
|
||||
# }
|
||||
|
||||
# env {
|
||||
# name = "REGISTRY_STORAGE_S3_SECRETKEY"
|
||||
# value = var.secret_key
|
||||
# }
|
||||
|
||||
# port {
|
||||
# container_port = 5000
|
||||
# protocol = "TCP"
|
||||
# }
|
||||
|
||||
# volume_mount {
|
||||
# name = "registry"
|
||||
# mount_path = "/var/lib/registry"
|
||||
# }
|
||||
# }
|
||||
|
||||
# volume {
|
||||
# name = "registry"
|
||||
|
||||
# host_path {
|
||||
# path = "/var/lib/registry"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
# resource "kubernetes_service" "registry" {
|
||||
# metadata {
|
||||
# namespace = module.k8s.namespace
|
||||
# name = "registry"
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# type = "ClusterIP"
|
||||
|
||||
# selector = {
|
||||
# system = "convox"
|
||||
# service = "registry"
|
||||
# }
|
||||
|
||||
# port {
|
||||
# name = "http"
|
||||
# port = 80
|
||||
# target_port = 5000
|
||||
# protocol = "TCP"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# resource "kubernetes_ingress" "registry" {
|
||||
# metadata {
|
||||
# namespace = module.k8s.namespace
|
||||
# name = "registry"
|
||||
|
||||
# annotations = {
|
||||
# "convox.idles" : "true"
|
||||
# }
|
||||
|
||||
# labels = {
|
||||
# system = "convox"
|
||||
# service = "registry"
|
||||
# }
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# tls {
|
||||
# hosts = ["registry.${module.router.endpoint}"]
|
||||
# }
|
||||
|
||||
# rule {
|
||||
# host = "registry.${module.router.endpoint}"
|
||||
|
||||
# http {
|
||||
# path {
|
||||
# backend {
|
||||
# service_name = kubernetes_service.registry.metadata.0.name
|
||||
# service_port = 80
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
27
terraform/rack/azure/variables.tf
Normal file
27
terraform/rack/azure/variables.tf
Normal file
@ -0,0 +1,27 @@
|
||||
# variable "identity" {
|
||||
# type = "string"
|
||||
# }
|
||||
|
||||
variable "kubeconfig" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "release" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "workspace" {
|
||||
type = "string"
|
||||
}
|
||||
82
terraform/router/azure/main.tf
Normal file
82
terraform/router/azure/main.tf
Normal file
@ -0,0 +1,82 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
version = "~> 1.36"
|
||||
}
|
||||
|
||||
provider "http" {
|
||||
version = "~> 1.1"
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
version = "~> 1.9"
|
||||
}
|
||||
|
||||
locals {
|
||||
tags = {
|
||||
System = "convox"
|
||||
Rack = var.name
|
||||
}
|
||||
}
|
||||
|
||||
data "azurerm_resource_group" "rack" {
|
||||
name = var.resource_group
|
||||
}
|
||||
|
||||
module "k8s" {
|
||||
source = "../k8s"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
namespace = var.namespace
|
||||
release = var.release
|
||||
|
||||
env = {
|
||||
CACHE = "redis"
|
||||
REDIS_ADDR = "${azurerm_redis_cache.cache.hostname}:${azurerm_redis_cache.cache.ssl_port}"
|
||||
REDIS_AUTH = azurerm_redis_cache.cache.primary_access_key
|
||||
REDIS_SECURE = "true"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "router" {
|
||||
metadata {
|
||||
namespace = var.namespace
|
||||
name = "router"
|
||||
}
|
||||
|
||||
spec {
|
||||
type = "LoadBalancer"
|
||||
|
||||
port {
|
||||
name = "http"
|
||||
port = 80
|
||||
protocol = "TCP"
|
||||
target_port = 80
|
||||
}
|
||||
|
||||
port {
|
||||
name = "https"
|
||||
port = 443
|
||||
protocol = "TCP"
|
||||
target_port = 443
|
||||
}
|
||||
|
||||
selector = {
|
||||
system = "convox"
|
||||
service = "router"
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [metadata[0].annotations]
|
||||
}
|
||||
}
|
||||
|
||||
data "http" "alias" {
|
||||
url = "https://alias.convox.com/alias/${kubernetes_service.router.load_balancer_ingress.0.ip}"
|
||||
}
|
||||
4
terraform/router/azure/outputs.tf
Normal file
4
terraform/router/azure/outputs.tf
Normal file
@ -0,0 +1,4 @@
|
||||
output "endpoint" {
|
||||
value = data.http.alias.body
|
||||
}
|
||||
|
||||
8
terraform/router/azure/redis.tf
Normal file
8
terraform/router/azure/redis.tf
Normal file
@ -0,0 +1,8 @@
|
||||
resource "azurerm_redis_cache" "cache" {
|
||||
name = "${var.name}-router"
|
||||
location = data.azurerm_resource_group.rack.location
|
||||
resource_group_name = data.azurerm_resource_group.rack.name
|
||||
capacity = 0
|
||||
family = "C"
|
||||
sku_name = "Basic"
|
||||
}
|
||||
19
terraform/router/azure/variables.tf
Normal file
19
terraform/router/azure/variables.tf
Normal file
@ -0,0 +1,19 @@
|
||||
variable "name" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "release" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
type = "string"
|
||||
}
|
||||
172
terraform/system/azure/identity/deployment.yml
Normal file
172
terraform/system/azure/identity/deployment.yml
Normal file
@ -0,0 +1,172 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: azureassignedidentities.aadpodidentity.k8s.io
|
||||
spec:
|
||||
group: aadpodidentity.k8s.io
|
||||
version: v1
|
||||
names:
|
||||
kind: AzureAssignedIdentity
|
||||
plural: azureassignedidentities
|
||||
scope: Namespaced
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: azureidentitybindings.aadpodidentity.k8s.io
|
||||
spec:
|
||||
group: aadpodidentity.k8s.io
|
||||
version: v1
|
||||
names:
|
||||
kind: AzureIdentityBinding
|
||||
plural: azureidentitybindings
|
||||
scope: Namespaced
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: azureidentities.aadpodidentity.k8s.io
|
||||
spec:
|
||||
group: aadpodidentity.k8s.io
|
||||
version: v1
|
||||
names:
|
||||
kind: AzureIdentity
|
||||
singular: azureidentity
|
||||
plural: azureidentities
|
||||
scope: Namespaced
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: azurepodidentityexceptions.aadpodidentity.k8s.io
|
||||
spec:
|
||||
group: aadpodidentity.k8s.io
|
||||
version: v1
|
||||
names:
|
||||
kind: AzurePodIdentityException
|
||||
singular: azurepodidentityexception
|
||||
plural: azurepodidentityexceptions
|
||||
scope: Namespaced
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nmi
|
||||
namespace: kube-system
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
component: nmi
|
||||
tier: node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: nmi
|
||||
tier: node
|
||||
spec:
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
name: iptableslock
|
||||
containers:
|
||||
- name: nmi
|
||||
image: "mcr.microsoft.com/k8s/aad-pod-identity/nmi:1.5.3"
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--host-ip=$(HOST_IP)"
|
||||
- "--node=$(NODE_NAME)"
|
||||
env:
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- mountPath: /run/xtables.lock
|
||||
name: iptableslock
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: mic
|
||||
name: mic
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
component: mic
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: mic
|
||||
spec:
|
||||
containers:
|
||||
- name: mic
|
||||
image: "mcr.microsoft.com/k8s/aad-pod-identity/mic:1.5.3"
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--kubeconfig=/etc/kubernetes/kubeconfig/kubeconfig"
|
||||
- "--cloudconfig=/etc/kubernetes/azure.json"
|
||||
- "--logtostderr"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 1024Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: kubeconfig
|
||||
mountPath: /etc/kubernetes/kubeconfig
|
||||
readOnly: true
|
||||
- name: certificates
|
||||
mountPath: /etc/kubernetes/certs
|
||||
readOnly: true
|
||||
- name: k8s-azure-file
|
||||
mountPath: /etc/kubernetes/azure.json
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
- name: certificates
|
||||
hostPath:
|
||||
path: /etc/kubernetes/certs
|
||||
- name: k8s-azure-file
|
||||
hostPath:
|
||||
path: /etc/kubernetes/azure.json
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
27
terraform/system/azure/identity/main.tf
Normal file
27
terraform/system/azure/identity/main.tf
Normal file
@ -0,0 +1,27 @@
|
||||
provider "kubernetes" {
|
||||
version = "~> 1.8"
|
||||
|
||||
config_path = var.kubeconfig
|
||||
}
|
||||
|
||||
resource "null_resource" "deployment" {
|
||||
provisioner "local-exec" {
|
||||
when = "create"
|
||||
command = "kubectl apply -f ${path.module}/deployment.yml"
|
||||
environment = {
|
||||
"KUBECONFIG" : var.kubeconfig,
|
||||
}
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
when = "destroy"
|
||||
command = "kubectl delete -f ${path.module}/deployment.yml"
|
||||
environment = {
|
||||
"KUBECONFIG" : var.kubeconfig,
|
||||
}
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = filesha256("${path.module}/deployment.yml")
|
||||
}
|
||||
}
|
||||
3
terraform/system/azure/identity/outputs.tf
Normal file
3
terraform/system/azure/identity/outputs.tf
Normal file
@ -0,0 +1,3 @@
|
||||
output "id" {
|
||||
value = null_resource.deployment.id
|
||||
}
|
||||
3
terraform/system/azure/identity/variables.tf
Normal file
3
terraform/system/azure/identity/variables.tf
Normal file
@ -0,0 +1,3 @@
|
||||
variable "kubeconfig" {
|
||||
type = string
|
||||
}
|
||||
69
terraform/system/azure/main.tf
Normal file
69
terraform/system/azure/main.tf
Normal file
@ -0,0 +1,69 @@
|
||||
provider "azurerm" {
|
||||
version = "~> 1.36"
|
||||
}
|
||||
|
||||
provider "http" {
|
||||
version = "~> 1.1"
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
version = "~> 1.9"
|
||||
|
||||
config_path = module.cluster.kubeconfig
|
||||
}
|
||||
|
||||
data "http" "releases" {
|
||||
url = "https://api.github.com/repos/convox/convox/releases"
|
||||
}
|
||||
|
||||
locals {
|
||||
current = jsondecode(data.http.releases.body).0.tag_name
|
||||
release = coalesce(var.release, local.current)
|
||||
}
|
||||
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
resource "azurerm_resource_group" "rack" {
|
||||
name = var.name
|
||||
location = var.region
|
||||
}
|
||||
|
||||
module "cluster" {
|
||||
source = "../../cluster/azure"
|
||||
|
||||
providers = {
|
||||
azurerm = azurerm
|
||||
}
|
||||
|
||||
name = var.name
|
||||
node_type = var.node_type
|
||||
region = var.region
|
||||
resource_group = azurerm_resource_group.rack.name
|
||||
}
|
||||
|
||||
# module "identity" {
|
||||
# source = "./identity"
|
||||
|
||||
# providers = {
|
||||
# kubernetes = kubernetes
|
||||
# }
|
||||
|
||||
# kubeconfig = module.cluster.kubeconfig
|
||||
# }
|
||||
|
||||
module "rack" {
|
||||
source = "../../rack/azure"
|
||||
|
||||
providers = {
|
||||
azurerm = azurerm
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
# identity = module.identity.id
|
||||
kubeconfig = module.cluster.kubeconfig
|
||||
name = var.name
|
||||
region = var.region
|
||||
release = local.release
|
||||
resource_group = azurerm_resource_group.rack.name
|
||||
workspace = module.cluster.workspace
|
||||
}
|
||||
7
terraform/system/azure/outputs.tf
Normal file
7
terraform/system/azure/outputs.tf
Normal file
@ -0,0 +1,7 @@
|
||||
output "api" {
|
||||
value = module.rack.api
|
||||
}
|
||||
|
||||
output "endpoint" {
|
||||
value = module.rack.endpoint
|
||||
}
|
||||
15
terraform/system/azure/variables.tf
Normal file
15
terraform/system/azure/variables.tf
Normal file
@ -0,0 +1,15 @@
|
||||
variable "name" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "node_type" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "release" {
|
||||
default = ""
|
||||
}
|
||||
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
284
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Normal file
284
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/mattn/go-ieproxy"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
|
||||
// requires that this Factory create a new instance of its Policy object.
|
||||
type Factory interface {
|
||||
New(next Policy, po *PolicyOptions) Policy
|
||||
}
|
||||
|
||||
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
|
||||
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
|
||||
|
||||
// New calls f(next,po).
|
||||
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
|
||||
return f(next, po)
|
||||
}
|
||||
|
||||
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
|
||||
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
|
||||
// Response goes backward through the linked-list for additional processing.
|
||||
// NOTE: Request is passed by value so changes do not change the caller's version of
|
||||
// the request. However, Request has some fields that reference mutable objects (not strings).
|
||||
// These references are copied; a deep copy is not performed. Specifically, this means that
|
||||
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
|
||||
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
|
||||
type Policy interface {
|
||||
Do(ctx context.Context, request Request) (Response, error)
|
||||
}
|
||||
|
||||
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
|
||||
type PolicyFunc func(ctx context.Context, request Request) (Response, error)
|
||||
|
||||
// Do calls f(ctx, request).
|
||||
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
|
||||
return f(ctx, request)
|
||||
}
|
||||
|
||||
// Options configures a Pipeline's behavior.
|
||||
type Options struct {
|
||||
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
|
||||
Log LogOptions
|
||||
}
|
||||
|
||||
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
|
||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
||||
// whose level is at least the level it was told to log. See the Log* constants.
|
||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
||||
type LogLevel uint32
|
||||
|
||||
const (
|
||||
// LogNone tells a logger not to log any entries passed to it.
|
||||
LogNone LogLevel = iota
|
||||
|
||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
||||
LogFatal
|
||||
|
||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
||||
LogPanic
|
||||
|
||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogError
|
||||
|
||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogWarning
|
||||
|
||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogInfo
|
||||
|
||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// LogOptions configures the pipeline's logging mechanism & level filtering.
|
||||
type LogOptions struct {
|
||||
Log func(level LogLevel, message string)
|
||||
|
||||
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
|
||||
// An application can return different values over the its lifetime; this allows the application to dynamically
|
||||
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
|
||||
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
|
||||
// Usually, the function will be implemented simply like this: return level <= LogWarning
|
||||
ShouldLog func(level LogLevel) bool
|
||||
}
|
||||
|
||||
type pipeline struct {
|
||||
factories []Factory
|
||||
options Options
|
||||
}
|
||||
|
||||
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
|
||||
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
|
||||
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
|
||||
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
|
||||
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
|
||||
//
|
||||
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
|
||||
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
|
||||
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
|
||||
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
|
||||
type Pipeline interface {
|
||||
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
|
||||
}
|
||||
|
||||
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
|
||||
func NewPipeline(factories []Factory, o Options) Pipeline {
|
||||
if o.HTTPSender == nil {
|
||||
o.HTTPSender = newDefaultHTTPClientFactory()
|
||||
}
|
||||
if o.Log.Log == nil {
|
||||
o.Log.Log = func(LogLevel, string) {} // No-op logger
|
||||
}
|
||||
return &pipeline{factories: factories, options: o}
|
||||
}
|
||||
|
||||
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
|
||||
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
|
||||
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
|
||||
// ultimately sends the transformed HTTP request over the network.
|
||||
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
|
||||
response, err := p.newPolicies(methodFactory).Do(ctx, request)
|
||||
request.close()
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (p *pipeline) newPolicies(methodFactory Factory) Policy {
|
||||
// The last Policy is the one that actually sends the request over the wire and gets the response.
|
||||
// It is overridable via the Options' HTTPSender field.
|
||||
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
|
||||
next := p.options.HTTPSender.New(nil, po)
|
||||
|
||||
// Walk over the slice of Factory objects in reverse (from wire to API)
|
||||
markers := 0
|
||||
for i := len(p.factories) - 1; i >= 0; i-- {
|
||||
factory := p.factories[i]
|
||||
if _, ok := factory.(methodFactoryMarker); ok {
|
||||
markers++
|
||||
if markers > 1 {
|
||||
panic("MethodFactoryMarker can only appear once in the pipeline")
|
||||
}
|
||||
if methodFactory != nil {
|
||||
// Replace MethodFactoryMarker with passed-in methodFactory
|
||||
next = methodFactory.New(next, po)
|
||||
}
|
||||
} else {
|
||||
// Use the slice's Factory to construct its Policy
|
||||
next = factory.New(next, po)
|
||||
}
|
||||
}
|
||||
|
||||
// Each Factory has created its Policy
|
||||
if markers == 0 && methodFactory != nil {
|
||||
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
|
||||
}
|
||||
return next // Return head of the Policy object linked-list
|
||||
}
|
||||
|
||||
// A PolicyOptions represents optional information that can be used by a node in the
|
||||
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
|
||||
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
|
||||
// uses the options to perform logging. But, in the future, this could be used for more.
|
||||
type PolicyOptions struct {
|
||||
pipeline *pipeline
|
||||
}
|
||||
|
||||
// ShouldLog returns true if the specified log level should be logged.
|
||||
func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
|
||||
if po.pipeline.options.Log.ShouldLog != nil {
|
||||
return po.pipeline.options.Log.ShouldLog(level)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Log logs a string to the Pipeline's Logger.
|
||||
func (po *PolicyOptions) Log(level LogLevel, msg string) {
|
||||
if !po.ShouldLog(level) {
|
||||
return // Short circuit message formatting if we're not logging it
|
||||
}
|
||||
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
po.pipeline.options.Log.Log(level, msg)
|
||||
|
||||
// If logger doesn't handle fatal/panic, we'll do it here.
|
||||
if level == LogFatal {
|
||||
os.Exit(1)
|
||||
} else if level == LogPanic {
|
||||
panic(msg)
|
||||
}
|
||||
}
|
||||
|
||||
var pipelineHTTPClient = newDefaultHTTPClient()
|
||||
|
||||
func newDefaultHTTPClient() *http.Client {
|
||||
// We want the Transport to have a large connection pool
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: ieproxy.GetProxyFunc(),
|
||||
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
||||
Dial /*Context*/ : (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).Dial, /*Context*/
|
||||
MaxIdleConns: 0, // No limit
|
||||
MaxIdleConnsPerHost: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableKeepAlives: false,
|
||||
DisableCompression: false,
|
||||
MaxResponseHeaderBytes: 0,
|
||||
//ResponseHeaderTimeout: time.Duration{},
|
||||
//ExpectContinueTimeout: time.Duration{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
|
||||
func newDefaultHTTPClientFactory() Factory {
|
||||
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
|
||||
return func(ctx context.Context, request Request) (Response, error) {
|
||||
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = NewError(err, "HTTP request failed")
|
||||
}
|
||||
return NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var mfm = methodFactoryMarker{} // Singleton
|
||||
|
||||
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
|
||||
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
|
||||
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
|
||||
func MethodFactoryMarker() Factory {
|
||||
return mfm
|
||||
}
|
||||
|
||||
type methodFactoryMarker struct {
|
||||
}
|
||||
|
||||
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
|
||||
panic("methodFactoryMarker policy should have been replaced with a method policy")
|
||||
}
|
||||
|
||||
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
|
||||
// By default no implemetation is provided here, because pipeline may be used in many different
|
||||
// contexts, so the correct implementation is context-dependent
|
||||
type LogSanitizer interface {
|
||||
SanitizeLogMessage(raw string) string
|
||||
}
|
||||
|
||||
var sanitizer LogSanitizer
|
||||
var enableForceLog bool = true
|
||||
|
||||
// SetLogSanitizer can be called to supply a custom LogSanitizer.
|
||||
// There is no threadsafety or locking on the underlying variable,
|
||||
// so call this function just once at startup of your application
|
||||
// (Don't later try to change the sanitizer on the fly).
|
||||
func SetLogSanitizer(s LogSanitizer)(){
|
||||
sanitizer = s
|
||||
}
|
||||
|
||||
// SetForceLogEnabled can be used to disable ForceLog
|
||||
// There is no threadsafety or locking on the underlying variable,
|
||||
// so call this function just once at startup of your application
|
||||
// (Don't later try to change the setting on the fly).
|
||||
func SetForceLogEnabled(enable bool)() {
|
||||
enableForceLog = enable
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
generated
vendored
Normal file
14
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package pipeline
|
||||
|
||||
|
||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func ForceLog(level LogLevel, msg string) {
|
||||
if !enableForceLog {
|
||||
return
|
||||
}
|
||||
if sanitizer != nil {
|
||||
msg = sanitizer.SanitizeLogMessage(msg)
|
||||
}
|
||||
forceLog(level, msg)
|
||||
}
|
||||
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Normal file
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
// forceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func forceLog(level LogLevel, msg string) {
|
||||
if defaultLogger == nil {
|
||||
return // Return fast if we failed to create the logger.
|
||||
}
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
switch level {
|
||||
case LogFatal:
|
||||
defaultLogger.Fatal(msg)
|
||||
case LogPanic:
|
||||
defaultLogger.Panic(msg)
|
||||
case LogError, LogWarning, LogInfo:
|
||||
defaultLogger.Print(msg)
|
||||
}
|
||||
}
|
||||
|
||||
var defaultLogger = func() *log.Logger {
|
||||
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
|
||||
return l
|
||||
}()
|
||||
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Normal file
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// forceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func forceLog(level LogLevel, msg string) {
|
||||
var el eventType
|
||||
switch level {
|
||||
case LogError, LogFatal, LogPanic:
|
||||
el = elError
|
||||
case LogWarning:
|
||||
el = elWarning
|
||||
case LogInfo:
|
||||
el = elInfo
|
||||
}
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
reportEvent(el, 0, msg)
|
||||
}
|
||||
|
||||
type eventType int16
|
||||
|
||||
const (
|
||||
elSuccess eventType = 0
|
||||
elError eventType = 1
|
||||
elWarning eventType = 2
|
||||
elInfo eventType = 4
|
||||
)
|
||||
|
||||
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
|
||||
advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
|
||||
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
|
||||
|
||||
sourceName, _ := os.Executable()
|
||||
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
|
||||
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
|
||||
if lastErr == nil { // On error, logging is a no-op
|
||||
return func(eventType eventType, eventID int32, msg string) {}
|
||||
}
|
||||
reportEvent := advAPI32.MustFindProc("ReportEventW")
|
||||
return func(eventType eventType, eventID int32, msg string) {
|
||||
s, _ := syscall.UTF16PtrFromString(msg)
|
||||
_, _, _ = reportEvent.Call(
|
||||
uintptr(handle), // HANDLE hEventLog
|
||||
uintptr(eventType), // WORD wType
|
||||
uintptr(0), // WORD wCategory
|
||||
uintptr(eventID), // DWORD dwEventID
|
||||
uintptr(0), // PSID lpUserSid
|
||||
uintptr(1), // WORD wNumStrings
|
||||
uintptr(0), // DWORD dwDataSize
|
||||
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
|
||||
uintptr(0)) // LPVOID lpRawData
|
||||
}
|
||||
}()
|
||||
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Normal file
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package pipeline implements an HTTP request/response middleware pipeline whose
|
||||
policy objects mutate an HTTP request's URL, query parameters, and/or headers before
|
||||
the request is sent over the wire.
|
||||
|
||||
Not all policy objects mutate an HTTP request; some policy objects simply impact the
|
||||
flow of requests/responses by performing operations such as logging, retry policies,
|
||||
timeouts, failure injection, and deserialization of response payloads.
|
||||
|
||||
Implementing the Policy Interface
|
||||
|
||||
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
|
||||
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
|
||||
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
|
||||
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
|
||||
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
|
||||
object sends the HTTP request over the network (by calling the HTTPSender's Do method).
|
||||
|
||||
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
|
||||
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
|
||||
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
|
||||
to the code that initiated the original HTTP request.
|
||||
|
||||
Here is a template for how to define a pipeline.Policy object:
|
||||
|
||||
type myPolicy struct {
|
||||
node PolicyNode
|
||||
// TODO: Add configuration/setting fields here (if desired)...
|
||||
}
|
||||
|
||||
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// TODO: Mutate/process the HTTP request here...
|
||||
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
|
||||
// TODO: Mutate/process the HTTP response here...
|
||||
return response, err // Return response/error to previous Policy
|
||||
}
|
||||
|
||||
Implementing the Factory Interface
|
||||
|
||||
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
|
||||
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
|
||||
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
|
||||
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
|
||||
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
|
||||
|
||||
Here is a template for how to define a pipeline.Policy object:
|
||||
|
||||
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
|
||||
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
|
||||
type myPolicyFactory struct {
|
||||
// TODO: Add any configuration/setting fields if desired...
|
||||
}
|
||||
|
||||
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
|
||||
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
|
||||
}
|
||||
|
||||
Using your Factory and Policy objects via a Pipeline
|
||||
|
||||
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
|
||||
this slice to the pipeline.NewPipeline function.
|
||||
|
||||
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
|
||||
|
||||
This function also requires an object implementing the HTTPSender interface. For simple scenarios,
|
||||
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
|
||||
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
|
||||
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
|
||||
or other objects that can simulate the network requests for testing purposes.
|
||||
|
||||
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
|
||||
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
|
||||
context.Context for cancelling the HTTP request (if desired).
|
||||
|
||||
type Pipeline interface {
|
||||
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
|
||||
}
|
||||
|
||||
Do iterates over the slice of Factory objects and tells each one to create its corresponding
|
||||
Policy object. After the linked-list of Policy objects have been created, Do calls the first
|
||||
Policy object passing it the Context & HTTP request parameters. These parameters now flow through
|
||||
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
|
||||
The last Policy object sends the message over the network.
|
||||
|
||||
When the network operation completes, the HTTP response and error return values pass
|
||||
back through the same Policy objects in reverse order. Most Policy objects ignore the
|
||||
response/error but some log the result, retry the operation (depending on the exact
|
||||
reason the operation failed), or deserialize the response's body. Your own Policy
|
||||
objects can do whatever they like when processing outgoing requests or incoming responses.
|
||||
|
||||
Note that after an I/O request runs to completion, the Policy objects for that request
|
||||
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
|
||||
them to be created once and reused over many I/O operations. This allows for efficient use of
|
||||
memory and also makes them safely usable by multiple goroutines concurrently.
|
||||
|
||||
Inserting a Method-Specific Factory into the Linked-List of Policy Objects
|
||||
|
||||
While Pipeline and Factory objects can be reused over many different operations, it is
|
||||
common to have special behavior for a specific operation/method. For example, a method
|
||||
may need to deserialize the response's body to an instance of a specific data type.
|
||||
To accommodate this, the Pipeline's Do method takes an additional method-specific
|
||||
Factory object. The Do method tells this Factory to create a Policy object and
|
||||
injects this method-specific Policy object into the linked-list of Policy objects.
|
||||
|
||||
When creating a Pipeline object, the slice of Factory objects passed must have 1
|
||||
(and only 1) entry marking where the method-specific Factory should be injected.
|
||||
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
|
||||
|
||||
func MethodFactoryMarker() pipeline.Factory
|
||||
|
||||
Creating an HTTP Request Object
|
||||
|
||||
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
|
||||
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
|
||||
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
|
||||
|
||||
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
|
||||
|
||||
To this function, you must pass a pipeline.RequestOptions that looks like this:
|
||||
|
||||
type RequestOptions struct {
|
||||
// The readable and seekable stream to be sent to the server as the request's body.
|
||||
Body io.ReadSeeker
|
||||
|
||||
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
|
||||
Progress ProgressReceiver
|
||||
}
|
||||
|
||||
The method and struct ensure that the request's body stream is a read/seekable stream.
|
||||
A seekable stream is required so that upon retry, the final Policy object can seek
|
||||
the stream back to the beginning before retrying the network request and re-uploading the
|
||||
body. In addition, you can associate a ProgressReceiver callback function which will be
|
||||
invoked periodically to report progress while bytes are being read from the body stream
|
||||
and sent over the network.
|
||||
|
||||
Processing the HTTP Response
|
||||
|
||||
When an HTTP response comes in from the network, a reference to Go's http.Response struct is
|
||||
embedded in a struct that implements the pipeline.Response interface:
|
||||
|
||||
type Response interface {
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
This interface is returned through all the Policy objects. Each Policy object can call the Response
|
||||
interface's Response method to examine (or mutate) the embedded http.Response object.
|
||||
|
||||
A Policy object can internally define another struct (implementing the pipeline.Response interface)
|
||||
that embeds an http.Response and adds additional fields and return this structure to other Policy
|
||||
objects. This allows a Policy object to deserialize the body to some other struct and return the
|
||||
original http.Response and the additional struct back through the Policy chain. Other Policy objects
|
||||
can see the Response but cannot see the additional struct with the deserialized body. After all the
|
||||
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
|
||||
The caller of this method can perform a type assertion attempting to get back to the struct type
|
||||
really returned by the Policy object. If the type assertion is successful, the caller now has
|
||||
access to both the http.Response and the deserialized struct object.*/
|
||||
package pipeline
|
||||
181
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Normal file
181
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
func errorWithPC(msg string, pc uintptr) string {
|
||||
s := ""
|
||||
if fn := runtime.FuncForPC(pc); fn != nil {
|
||||
file, line := fn.FileLine(pc)
|
||||
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
|
||||
}
|
||||
s += msg + "\n\n"
|
||||
return s
|
||||
}
|
||||
|
||||
func getPC(callersToSkip int) uintptr {
|
||||
// Get the PC of Initialize method's caller.
|
||||
pc := [1]uintptr{}
|
||||
_ = runtime.Callers(callersToSkip, pc[:])
|
||||
return pc[0]
|
||||
}
|
||||
|
||||
// ErrorNode can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
|
||||
type ErrorNode struct {
|
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
cause error // Refers to the preceding error (or nil)
|
||||
}
|
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNode) Error(msg string) string {
|
||||
s := errorWithPC(msg, e.pc)
|
||||
if e.cause != nil {
|
||||
s += e.cause.Error() + "\n"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Cause returns the error that preceded this error.
|
||||
func (e *ErrorNode) Cause() error { return e.cause }
|
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNode) Temporary() bool {
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
for err := e.cause; err != nil; {
|
||||
if t, ok := err.(temporary); ok {
|
||||
return t.Temporary()
|
||||
}
|
||||
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNode) Timeout() bool {
|
||||
type timeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
for err := e.cause; err != nil; {
|
||||
if t, ok := err.(timeout); ok {
|
||||
return t.Timeout()
|
||||
}
|
||||
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter and saves the cause (preceding error).
|
||||
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
|
||||
pc := getPC(callersToSkip)
|
||||
return ErrorNode{pc: pc, cause: cause}
|
||||
}
|
||||
|
||||
// Cause walks all the preceding errors and return the originating error.
|
||||
func Cause(err error) error {
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ErrorNodeNoCause can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support.
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
|
||||
type ErrorNodeNoCause struct {
|
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
}
|
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNodeNoCause) Error(msg string) string {
|
||||
return errorWithPC(msg, e.pc)
|
||||
}
|
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNodeNoCause) Temporary() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNodeNoCause) Timeout() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter.
|
||||
// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause {
|
||||
pc := getPC(callersToSkip)
|
||||
return ErrorNodeNoCause{pc: pc}
|
||||
}
|
||||
|
||||
// NewError creates a simple string error (like Error.New). But, this
|
||||
// error also captures the caller's Program Counter and the preceding error (if provided).
|
||||
func NewError(cause error, msg string) error {
|
||||
if cause != nil {
|
||||
return &pcError{
|
||||
ErrorNode: ErrorNode{}.Initialize(cause, 3),
|
||||
msg: msg,
|
||||
}
|
||||
}
|
||||
return &pcErrorNoCause{
|
||||
ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3),
|
||||
msg: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
||||
type pcError struct {
|
||||
ErrorNode
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter
|
||||
// symbols and calls Error on the preceding error so you can see the full error chain.
|
||||
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
|
||||
|
||||
// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
|
||||
type pcErrorNoCause struct {
|
||||
ErrorNodeNoCause
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter symbols.
|
||||
func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) }
|
||||
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Normal file
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package pipeline
|
||||
|
||||
import "io"
|
||||
|
||||
// ********** The following is common between the request body AND the response body.
|
||||
|
||||
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
|
||||
type ProgressReceiver func(bytesTransferred int64)
|
||||
|
||||
// ********** The following are specific to the request body (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type requestBodyProgress struct {
|
||||
requestBody io.ReadSeeker // Seeking is required to support retries
|
||||
pr ProgressReceiver
|
||||
}
|
||||
|
||||
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
|
||||
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
|
||||
if pr == nil {
|
||||
panic("pr must not be nil")
|
||||
}
|
||||
return &requestBodyProgress{requestBody: requestBody, pr: pr}
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = rbp.requestBody.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Invokes the user's callback method to report progress
|
||||
position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rbp.pr(position)
|
||||
return
|
||||
}
|
||||
|
||||
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
||||
return rbp.requestBody.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
|
||||
func (rbp *requestBodyProgress) Close() error {
|
||||
if c, ok := rbp.requestBody.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ********** The following are specific to the response body (a ReadCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type responseBodyProgress struct {
|
||||
responseBody io.ReadCloser
|
||||
pr ProgressReceiver
|
||||
offset int64
|
||||
}
|
||||
|
||||
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
|
||||
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
|
||||
if pr == nil {
|
||||
panic("pr must not be nil")
|
||||
}
|
||||
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = rbp.responseBody.Read(p)
|
||||
rbp.offset += int64(n)
|
||||
|
||||
// Invokes the user's callback method to report progress
|
||||
rbp.pr(rbp.offset)
|
||||
return
|
||||
}
|
||||
|
||||
func (rbp *responseBodyProgress) Close() error {
|
||||
return rbp.responseBody.Close()
|
||||
}
|
||||
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Normal file
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
|
||||
type Request struct {
|
||||
*http.Request
|
||||
}
|
||||
|
||||
// NewRequest initializes a new HTTP request object with any desired options.
|
||||
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
|
||||
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
|
||||
|
||||
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
|
||||
request.Request = &http.Request{
|
||||
Method: method,
|
||||
URL: &url,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: url.Host,
|
||||
}
|
||||
|
||||
if body != nil {
|
||||
err = request.SetBody(body)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBody sets the body and content length, assumes body is not nil.
|
||||
func (r Request) SetBody(body io.ReadSeeker) error {
|
||||
size, err := body.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body.Seek(0, io.SeekStart)
|
||||
r.ContentLength = size
|
||||
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
|
||||
|
||||
if size != 0 {
|
||||
r.Body = &retryableRequestBody{body: body}
|
||||
r.GetBody = func() (io.ReadCloser, error) {
|
||||
_, err := body.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Body, nil
|
||||
}
|
||||
} else {
|
||||
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
|
||||
r.Body = http.NoBody
|
||||
r.GetBody = func() (io.ReadCloser, error) {
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
// close the user-provided empty body
|
||||
if c, ok := body.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
|
||||
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
|
||||
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
|
||||
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
|
||||
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
|
||||
func (r Request) Copy() Request {
|
||||
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
|
||||
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
|
||||
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
|
||||
}
|
||||
copy := *r.Request // Copy the request
|
||||
urlCopy := *(r.Request.URL) // Copy the URL
|
||||
copy.URL = &urlCopy
|
||||
copy.Header = http.Header{} // Copy the header
|
||||
for k, vs := range r.Header {
|
||||
for _, value := range vs {
|
||||
copy.Header.Add(k, value)
|
||||
}
|
||||
}
|
||||
return Request{Request: ©} // Return the copy
|
||||
}
|
||||
|
||||
func (r Request) close() error {
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
c, ok := r.Body.(*retryableRequestBody)
|
||||
if !ok {
|
||||
panic("unexpected request body type (should be *retryableReadSeekerCloser)")
|
||||
}
|
||||
return c.realClose()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
|
||||
func (r Request) RewindBody() error {
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
s, ok := r.Body.(io.Seeker)
|
||||
if !ok {
|
||||
panic("unexpected request body type (should be io.Seeker)")
|
||||
}
|
||||
|
||||
// Reset the stream back to the beginning
|
||||
_, err := s.Seek(0, io.SeekStart)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type retryableRequestBody struct {
|
||||
body io.ReadSeeker // Seeking is required to support retries
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
|
||||
return b.body.Read(p)
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
||||
return b.body.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) Close() error {
|
||||
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
|
||||
// The pipeline closes the request body upon success.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) realClose() error {
|
||||
if c, ok := b.body.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Normal file
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
|
||||
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
|
||||
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
|
||||
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
|
||||
// to the expected struct and returns the struct to its caller.
|
||||
type Response interface {
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
// This is the default struct that has the http.Response.
|
||||
// A method can replace this struct with its own struct containing an http.Response
|
||||
// field and any other additional fields.
|
||||
type httpResponse struct {
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewHTTPResponse is typically called by a Policy object to return a Response object.
|
||||
func NewHTTPResponse(response *http.Response) Response {
|
||||
return &httpResponse{response: response}
|
||||
}
|
||||
|
||||
// This method satisfies the public Response interface's Response method
|
||||
func (r httpResponse) Response() *http.Response {
|
||||
return r.response
|
||||
}
|
||||
|
||||
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
|
||||
// not nil, then these are also written into the Buffer.
|
||||
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
|
||||
// Write the request into the buffer.
|
||||
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
|
||||
writeHeader(b, request.Header)
|
||||
if response != nil {
|
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
||||
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
|
||||
writeHeader(b, response.Header)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
||||
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
|
||||
}
|
||||
}
|
||||
|
||||
// formatHeaders appends an HTTP request's or response's header into a Buffer.
|
||||
func writeHeader(b *bytes.Buffer, header map[string][]string) {
|
||||
if len(header) == 0 {
|
||||
b.WriteString(" (no headers)\n")
|
||||
return
|
||||
}
|
||||
keys := make([]string, 0, len(header))
|
||||
// Alphabetize the headers
|
||||
for k := range header {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
// Redact the value of any Authorization header to prevent security information from persisting in logs
|
||||
value := interface{}("REDACTED")
|
||||
if !strings.EqualFold(k, "Authorization") {
|
||||
value = header[k]
|
||||
}
|
||||
fmt.Fprintf(b, " %s: %+v\n", k, value)
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
Normal file
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
package pipeline
|
||||
|
||||
const (
|
||||
// UserAgent is the string to be used in the user agent string when making requests.
|
||||
UserAgent = "azure-pipeline-go/" + Version
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
||||
Version = "0.2.1"
|
||||
)
|
||||
202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE
generated
vendored
Normal file
202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE
generated
vendored
Normal file
5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
Microsoft Azure-SDK-for-Go
|
||||
Copyright 2014-2017 Microsoft
|
||||
|
||||
This product includes software developed at
|
||||
the Microsoft Corporation (https://www.microsoft.com).
|
||||
623
vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage/models.go
generated
vendored
Normal file
623
vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/storage/mgmt/storage/models.go
generated
vendored
Normal file
@ -0,0 +1,623 @@
|
||||
// +build go1.9
|
||||
|
||||
// Copyright 2019 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This code was auto-generated by:
|
||||
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
original "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultBaseURI = original.DefaultBaseURI
|
||||
)
|
||||
|
||||
type AccessTier = original.AccessTier
|
||||
|
||||
const (
|
||||
Cool AccessTier = original.Cool
|
||||
Hot AccessTier = original.Hot
|
||||
)
|
||||
|
||||
type AccountExpand = original.AccountExpand
|
||||
|
||||
const (
|
||||
AccountExpandGeoReplicationStats AccountExpand = original.AccountExpandGeoReplicationStats
|
||||
)
|
||||
|
||||
type AccountStatus = original.AccountStatus
|
||||
|
||||
const (
|
||||
Available AccountStatus = original.Available
|
||||
Unavailable AccountStatus = original.Unavailable
|
||||
)
|
||||
|
||||
type Action = original.Action
|
||||
|
||||
const (
|
||||
Allow Action = original.Allow
|
||||
)
|
||||
|
||||
type Action1 = original.Action1
|
||||
|
||||
const (
|
||||
Acquire Action1 = original.Acquire
|
||||
Break Action1 = original.Break
|
||||
Change Action1 = original.Change
|
||||
Release Action1 = original.Release
|
||||
Renew Action1 = original.Renew
|
||||
)
|
||||
|
||||
type Bypass = original.Bypass
|
||||
|
||||
const (
|
||||
AzureServices Bypass = original.AzureServices
|
||||
Logging Bypass = original.Logging
|
||||
Metrics Bypass = original.Metrics
|
||||
None Bypass = original.None
|
||||
)
|
||||
|
||||
type DefaultAction = original.DefaultAction
|
||||
|
||||
const (
|
||||
DefaultActionAllow DefaultAction = original.DefaultActionAllow
|
||||
DefaultActionDeny DefaultAction = original.DefaultActionDeny
|
||||
)
|
||||
|
||||
type DirectoryServiceOptions = original.DirectoryServiceOptions
|
||||
|
||||
const (
|
||||
DirectoryServiceOptionsAADDS DirectoryServiceOptions = original.DirectoryServiceOptionsAADDS
|
||||
DirectoryServiceOptionsAD DirectoryServiceOptions = original.DirectoryServiceOptionsAD
|
||||
DirectoryServiceOptionsNone DirectoryServiceOptions = original.DirectoryServiceOptionsNone
|
||||
)
|
||||
|
||||
type GeoReplicationStatus = original.GeoReplicationStatus
|
||||
|
||||
const (
|
||||
GeoReplicationStatusBootstrap GeoReplicationStatus = original.GeoReplicationStatusBootstrap
|
||||
GeoReplicationStatusLive GeoReplicationStatus = original.GeoReplicationStatusLive
|
||||
GeoReplicationStatusUnavailable GeoReplicationStatus = original.GeoReplicationStatusUnavailable
|
||||
)
|
||||
|
||||
type HTTPProtocol = original.HTTPProtocol
|
||||
|
||||
const (
|
||||
HTTPS HTTPProtocol = original.HTTPS
|
||||
Httpshttp HTTPProtocol = original.Httpshttp
|
||||
)
|
||||
|
||||
type ImmutabilityPolicyState = original.ImmutabilityPolicyState
|
||||
|
||||
const (
|
||||
Locked ImmutabilityPolicyState = original.Locked
|
||||
Unlocked ImmutabilityPolicyState = original.Unlocked
|
||||
)
|
||||
|
||||
type ImmutabilityPolicyUpdateType = original.ImmutabilityPolicyUpdateType
|
||||
|
||||
const (
|
||||
Extend ImmutabilityPolicyUpdateType = original.Extend
|
||||
Lock ImmutabilityPolicyUpdateType = original.Lock
|
||||
Put ImmutabilityPolicyUpdateType = original.Put
|
||||
)
|
||||
|
||||
type KeyPermission = original.KeyPermission
|
||||
|
||||
const (
|
||||
Full KeyPermission = original.Full
|
||||
Read KeyPermission = original.Read
|
||||
)
|
||||
|
||||
type KeySource = original.KeySource
|
||||
|
||||
const (
|
||||
MicrosoftKeyvault KeySource = original.MicrosoftKeyvault
|
||||
MicrosoftStorage KeySource = original.MicrosoftStorage
|
||||
)
|
||||
|
||||
type Kind = original.Kind
|
||||
|
||||
const (
|
||||
BlobStorage Kind = original.BlobStorage
|
||||
BlockBlobStorage Kind = original.BlockBlobStorage
|
||||
FileStorage Kind = original.FileStorage
|
||||
Storage Kind = original.Storage
|
||||
StorageV2 Kind = original.StorageV2
|
||||
)
|
||||
|
||||
type LargeFileSharesState = original.LargeFileSharesState
|
||||
|
||||
const (
|
||||
Disabled LargeFileSharesState = original.Disabled
|
||||
Enabled LargeFileSharesState = original.Enabled
|
||||
)
|
||||
|
||||
type LeaseDuration = original.LeaseDuration
|
||||
|
||||
const (
|
||||
Fixed LeaseDuration = original.Fixed
|
||||
Infinite LeaseDuration = original.Infinite
|
||||
)
|
||||
|
||||
type LeaseState = original.LeaseState
|
||||
|
||||
const (
|
||||
LeaseStateAvailable LeaseState = original.LeaseStateAvailable
|
||||
LeaseStateBreaking LeaseState = original.LeaseStateBreaking
|
||||
LeaseStateBroken LeaseState = original.LeaseStateBroken
|
||||
LeaseStateExpired LeaseState = original.LeaseStateExpired
|
||||
LeaseStateLeased LeaseState = original.LeaseStateLeased
|
||||
)
|
||||
|
||||
type LeaseStatus = original.LeaseStatus
|
||||
|
||||
const (
|
||||
LeaseStatusLocked LeaseStatus = original.LeaseStatusLocked
|
||||
LeaseStatusUnlocked LeaseStatus = original.LeaseStatusUnlocked
|
||||
)
|
||||
|
||||
type ListKeyExpand = original.ListKeyExpand
|
||||
|
||||
const (
|
||||
Kerb ListKeyExpand = original.Kerb
|
||||
)
|
||||
|
||||
type Permissions = original.Permissions
|
||||
|
||||
const (
|
||||
A Permissions = original.A
|
||||
C Permissions = original.C
|
||||
D Permissions = original.D
|
||||
L Permissions = original.L
|
||||
P Permissions = original.P
|
||||
R Permissions = original.R
|
||||
U Permissions = original.U
|
||||
W Permissions = original.W
|
||||
)
|
||||
|
||||
type PrivateEndpointConnectionProvisioningState = original.PrivateEndpointConnectionProvisioningState
|
||||
|
||||
const (
|
||||
Creating PrivateEndpointConnectionProvisioningState = original.Creating
|
||||
Deleting PrivateEndpointConnectionProvisioningState = original.Deleting
|
||||
Failed PrivateEndpointConnectionProvisioningState = original.Failed
|
||||
Succeeded PrivateEndpointConnectionProvisioningState = original.Succeeded
|
||||
)
|
||||
|
||||
type PrivateEndpointServiceConnectionStatus = original.PrivateEndpointServiceConnectionStatus
|
||||
|
||||
const (
|
||||
Approved PrivateEndpointServiceConnectionStatus = original.Approved
|
||||
Pending PrivateEndpointServiceConnectionStatus = original.Pending
|
||||
Rejected PrivateEndpointServiceConnectionStatus = original.Rejected
|
||||
)
|
||||
|
||||
type ProvisioningState = original.ProvisioningState
|
||||
|
||||
const (
|
||||
ProvisioningStateCreating ProvisioningState = original.ProvisioningStateCreating
|
||||
ProvisioningStateResolvingDNS ProvisioningState = original.ProvisioningStateResolvingDNS
|
||||
ProvisioningStateSucceeded ProvisioningState = original.ProvisioningStateSucceeded
|
||||
)
|
||||
|
||||
type PublicAccess = original.PublicAccess
|
||||
|
||||
const (
|
||||
PublicAccessBlob PublicAccess = original.PublicAccessBlob
|
||||
PublicAccessContainer PublicAccess = original.PublicAccessContainer
|
||||
PublicAccessNone PublicAccess = original.PublicAccessNone
|
||||
)
|
||||
|
||||
type Reason = original.Reason
|
||||
|
||||
const (
|
||||
AccountNameInvalid Reason = original.AccountNameInvalid
|
||||
AlreadyExists Reason = original.AlreadyExists
|
||||
)
|
||||
|
||||
type ReasonCode = original.ReasonCode
|
||||
|
||||
const (
|
||||
NotAvailableForSubscription ReasonCode = original.NotAvailableForSubscription
|
||||
QuotaID ReasonCode = original.QuotaID
|
||||
)
|
||||
|
||||
type Services = original.Services
|
||||
|
||||
const (
|
||||
B Services = original.B
|
||||
F Services = original.F
|
||||
Q Services = original.Q
|
||||
T Services = original.T
|
||||
)
|
||||
|
||||
type SignedResource = original.SignedResource
|
||||
|
||||
const (
|
||||
SignedResourceB SignedResource = original.SignedResourceB
|
||||
SignedResourceC SignedResource = original.SignedResourceC
|
||||
SignedResourceF SignedResource = original.SignedResourceF
|
||||
SignedResourceS SignedResource = original.SignedResourceS
|
||||
)
|
||||
|
||||
type SignedResourceTypes = original.SignedResourceTypes
|
||||
|
||||
const (
|
||||
SignedResourceTypesC SignedResourceTypes = original.SignedResourceTypesC
|
||||
SignedResourceTypesO SignedResourceTypes = original.SignedResourceTypesO
|
||||
SignedResourceTypesS SignedResourceTypes = original.SignedResourceTypesS
|
||||
)
|
||||
|
||||
type SkuName = original.SkuName
|
||||
|
||||
const (
|
||||
PremiumLRS SkuName = original.PremiumLRS
|
||||
PremiumZRS SkuName = original.PremiumZRS
|
||||
StandardGRS SkuName = original.StandardGRS
|
||||
StandardGZRS SkuName = original.StandardGZRS
|
||||
StandardLRS SkuName = original.StandardLRS
|
||||
StandardRAGRS SkuName = original.StandardRAGRS
|
||||
StandardRAGZRS SkuName = original.StandardRAGZRS
|
||||
StandardZRS SkuName = original.StandardZRS
|
||||
)
|
||||
|
||||
type SkuTier = original.SkuTier
|
||||
|
||||
const (
|
||||
Premium SkuTier = original.Premium
|
||||
Standard SkuTier = original.Standard
|
||||
)
|
||||
|
||||
type State = original.State
|
||||
|
||||
const (
|
||||
StateDeprovisioning State = original.StateDeprovisioning
|
||||
StateFailed State = original.StateFailed
|
||||
StateNetworkSourceDeleted State = original.StateNetworkSourceDeleted
|
||||
StateProvisioning State = original.StateProvisioning
|
||||
StateSucceeded State = original.StateSucceeded
|
||||
)
|
||||
|
||||
type UsageUnit = original.UsageUnit
|
||||
|
||||
const (
|
||||
Bytes UsageUnit = original.Bytes
|
||||
BytesPerSecond UsageUnit = original.BytesPerSecond
|
||||
Count UsageUnit = original.Count
|
||||
CountsPerSecond UsageUnit = original.CountsPerSecond
|
||||
Percent UsageUnit = original.Percent
|
||||
Seconds UsageUnit = original.Seconds
|
||||
)
|
||||
|
||||
type Account = original.Account
|
||||
type AccountCheckNameAvailabilityParameters = original.AccountCheckNameAvailabilityParameters
|
||||
type AccountCreateParameters = original.AccountCreateParameters
|
||||
type AccountKey = original.AccountKey
|
||||
type AccountListKeysResult = original.AccountListKeysResult
|
||||
type AccountListResult = original.AccountListResult
|
||||
type AccountListResultIterator = original.AccountListResultIterator
|
||||
type AccountListResultPage = original.AccountListResultPage
|
||||
type AccountProperties = original.AccountProperties
|
||||
type AccountPropertiesCreateParameters = original.AccountPropertiesCreateParameters
|
||||
type AccountPropertiesUpdateParameters = original.AccountPropertiesUpdateParameters
|
||||
type AccountRegenerateKeyParameters = original.AccountRegenerateKeyParameters
|
||||
type AccountSasParameters = original.AccountSasParameters
|
||||
type AccountUpdateParameters = original.AccountUpdateParameters
|
||||
type AccountsClient = original.AccountsClient
|
||||
type AccountsCreateFuture = original.AccountsCreateFuture
|
||||
type AccountsFailoverFuture = original.AccountsFailoverFuture
|
||||
type ActiveDirectoryProperties = original.ActiveDirectoryProperties
|
||||
type AzureEntityResource = original.AzureEntityResource
|
||||
type AzureFilesIdentityBasedAuthentication = original.AzureFilesIdentityBasedAuthentication
|
||||
type BaseClient = original.BaseClient
|
||||
type BlobContainer = original.BlobContainer
|
||||
type BlobContainersClient = original.BlobContainersClient
|
||||
type BlobServiceItems = original.BlobServiceItems
|
||||
type BlobServiceProperties = original.BlobServiceProperties
|
||||
type BlobServicePropertiesProperties = original.BlobServicePropertiesProperties
|
||||
type BlobServicesClient = original.BlobServicesClient
|
||||
type ChangeFeed = original.ChangeFeed
|
||||
type CheckNameAvailabilityResult = original.CheckNameAvailabilityResult
|
||||
type CloudError = original.CloudError
|
||||
type CloudErrorBody = original.CloudErrorBody
|
||||
type ContainerProperties = original.ContainerProperties
|
||||
type CorsRule = original.CorsRule
|
||||
type CorsRules = original.CorsRules
|
||||
type CustomDomain = original.CustomDomain
|
||||
type DateAfterCreation = original.DateAfterCreation
|
||||
type DateAfterModification = original.DateAfterModification
|
||||
type DeleteRetentionPolicy = original.DeleteRetentionPolicy
|
||||
type Dimension = original.Dimension
|
||||
type Encryption = original.Encryption
|
||||
type EncryptionService = original.EncryptionService
|
||||
type EncryptionServices = original.EncryptionServices
|
||||
type Endpoints = original.Endpoints
|
||||
type ErrorResponse = original.ErrorResponse
|
||||
type FileServiceItems = original.FileServiceItems
|
||||
type FileServiceProperties = original.FileServiceProperties
|
||||
type FileServicePropertiesProperties = original.FileServicePropertiesProperties
|
||||
type FileServicesClient = original.FileServicesClient
|
||||
type FileShare = original.FileShare
|
||||
type FileShareItem = original.FileShareItem
|
||||
type FileShareItems = original.FileShareItems
|
||||
type FileShareItemsIterator = original.FileShareItemsIterator
|
||||
type FileShareItemsPage = original.FileShareItemsPage
|
||||
type FileShareProperties = original.FileShareProperties
|
||||
type FileSharesClient = original.FileSharesClient
|
||||
type GeoReplicationStats = original.GeoReplicationStats
|
||||
type IPRule = original.IPRule
|
||||
type Identity = original.Identity
|
||||
type ImmutabilityPolicy = original.ImmutabilityPolicy
|
||||
type ImmutabilityPolicyProperties = original.ImmutabilityPolicyProperties
|
||||
type ImmutabilityPolicyProperty = original.ImmutabilityPolicyProperty
|
||||
type KeyVaultProperties = original.KeyVaultProperties
|
||||
type LeaseContainerRequest = original.LeaseContainerRequest
|
||||
type LeaseContainerResponse = original.LeaseContainerResponse
|
||||
type LegalHold = original.LegalHold
|
||||
type LegalHoldProperties = original.LegalHoldProperties
|
||||
type ListAccountSasResponse = original.ListAccountSasResponse
|
||||
type ListContainerItem = original.ListContainerItem
|
||||
type ListContainerItems = original.ListContainerItems
|
||||
type ListContainerItemsIterator = original.ListContainerItemsIterator
|
||||
type ListContainerItemsPage = original.ListContainerItemsPage
|
||||
type ListServiceSasResponse = original.ListServiceSasResponse
|
||||
type ManagementPoliciesClient = original.ManagementPoliciesClient
|
||||
type ManagementPolicy = original.ManagementPolicy
|
||||
type ManagementPolicyAction = original.ManagementPolicyAction
|
||||
type ManagementPolicyBaseBlob = original.ManagementPolicyBaseBlob
|
||||
type ManagementPolicyDefinition = original.ManagementPolicyDefinition
|
||||
type ManagementPolicyFilter = original.ManagementPolicyFilter
|
||||
type ManagementPolicyProperties = original.ManagementPolicyProperties
|
||||
type ManagementPolicyRule = original.ManagementPolicyRule
|
||||
type ManagementPolicySchema = original.ManagementPolicySchema
|
||||
type ManagementPolicySnapShot = original.ManagementPolicySnapShot
|
||||
type MetricSpecification = original.MetricSpecification
|
||||
type NetworkRuleSet = original.NetworkRuleSet
|
||||
type Operation = original.Operation
|
||||
type OperationDisplay = original.OperationDisplay
|
||||
type OperationListResult = original.OperationListResult
|
||||
type OperationProperties = original.OperationProperties
|
||||
type OperationsClient = original.OperationsClient
|
||||
type PrivateEndpoint = original.PrivateEndpoint
|
||||
type PrivateEndpointConnection = original.PrivateEndpointConnection
|
||||
type PrivateEndpointConnectionProperties = original.PrivateEndpointConnectionProperties
|
||||
type PrivateEndpointConnectionsClient = original.PrivateEndpointConnectionsClient
|
||||
type PrivateLinkResource = original.PrivateLinkResource
|
||||
type PrivateLinkResourceListResult = original.PrivateLinkResourceListResult
|
||||
type PrivateLinkResourceProperties = original.PrivateLinkResourceProperties
|
||||
type PrivateLinkResourcesClient = original.PrivateLinkResourcesClient
|
||||
type PrivateLinkServiceConnectionState = original.PrivateLinkServiceConnectionState
|
||||
type ProxyResource = original.ProxyResource
|
||||
type Resource = original.Resource
|
||||
type Restriction = original.Restriction
|
||||
type SKUCapability = original.SKUCapability
|
||||
type ServiceSasParameters = original.ServiceSasParameters
|
||||
type ServiceSpecification = original.ServiceSpecification
|
||||
type Sku = original.Sku
|
||||
type SkuListResult = original.SkuListResult
|
||||
type SkusClient = original.SkusClient
|
||||
type TagProperty = original.TagProperty
|
||||
type TrackedResource = original.TrackedResource
|
||||
type UpdateHistoryProperty = original.UpdateHistoryProperty
|
||||
type Usage = original.Usage
|
||||
type UsageListResult = original.UsageListResult
|
||||
type UsageName = original.UsageName
|
||||
type UsagesClient = original.UsagesClient
|
||||
type VirtualNetworkRule = original.VirtualNetworkRule
|
||||
|
||||
func New(subscriptionID string) BaseClient {
|
||||
return original.New(subscriptionID)
|
||||
}
|
||||
func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
|
||||
return original.NewAccountListResultIterator(page)
|
||||
}
|
||||
func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
|
||||
return original.NewAccountListResultPage(getNextPage)
|
||||
}
|
||||
func NewAccountsClient(subscriptionID string) AccountsClient {
|
||||
return original.NewAccountsClient(subscriptionID)
|
||||
}
|
||||
func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient {
|
||||
return original.NewAccountsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewBlobContainersClient(subscriptionID string) BlobContainersClient {
|
||||
return original.NewBlobContainersClient(subscriptionID)
|
||||
}
|
||||
func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient {
|
||||
return original.NewBlobContainersClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
|
||||
return original.NewBlobServicesClient(subscriptionID)
|
||||
}
|
||||
func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
|
||||
return original.NewBlobServicesClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewFileServicesClient(subscriptionID string) FileServicesClient {
|
||||
return original.NewFileServicesClient(subscriptionID)
|
||||
}
|
||||
func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
|
||||
return original.NewFileServicesClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
|
||||
return original.NewFileShareItemsIterator(page)
|
||||
}
|
||||
func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
|
||||
return original.NewFileShareItemsPage(getNextPage)
|
||||
}
|
||||
func NewFileSharesClient(subscriptionID string) FileSharesClient {
|
||||
return original.NewFileSharesClient(subscriptionID)
|
||||
}
|
||||
func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
|
||||
return original.NewFileSharesClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator {
|
||||
return original.NewListContainerItemsIterator(page)
|
||||
}
|
||||
func NewListContainerItemsPage(getNextPage func(context.Context, ListContainerItems) (ListContainerItems, error)) ListContainerItemsPage {
|
||||
return original.NewListContainerItemsPage(getNextPage)
|
||||
}
|
||||
func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient {
|
||||
return original.NewManagementPoliciesClient(subscriptionID)
|
||||
}
|
||||
func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient {
|
||||
return original.NewManagementPoliciesClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewOperationsClient(subscriptionID string) OperationsClient {
|
||||
return original.NewOperationsClient(subscriptionID)
|
||||
}
|
||||
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
|
||||
return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient {
|
||||
return original.NewPrivateEndpointConnectionsClient(subscriptionID)
|
||||
}
|
||||
func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient {
|
||||
return original.NewPrivateEndpointConnectionsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient {
|
||||
return original.NewPrivateLinkResourcesClient(subscriptionID)
|
||||
}
|
||||
func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient {
|
||||
return original.NewPrivateLinkResourcesClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewSkusClient(subscriptionID string) SkusClient {
|
||||
return original.NewSkusClient(subscriptionID)
|
||||
}
|
||||
func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient {
|
||||
return original.NewSkusClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewUsagesClient(subscriptionID string) UsagesClient {
|
||||
return original.NewUsagesClient(subscriptionID)
|
||||
}
|
||||
func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
|
||||
return original.NewUsagesClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
|
||||
return original.NewWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func PossibleAccessTierValues() []AccessTier {
|
||||
return original.PossibleAccessTierValues()
|
||||
}
|
||||
func PossibleAccountExpandValues() []AccountExpand {
|
||||
return original.PossibleAccountExpandValues()
|
||||
}
|
||||
func PossibleAccountStatusValues() []AccountStatus {
|
||||
return original.PossibleAccountStatusValues()
|
||||
}
|
||||
func PossibleAction1Values() []Action1 {
|
||||
return original.PossibleAction1Values()
|
||||
}
|
||||
func PossibleActionValues() []Action {
|
||||
return original.PossibleActionValues()
|
||||
}
|
||||
func PossibleBypassValues() []Bypass {
|
||||
return original.PossibleBypassValues()
|
||||
}
|
||||
func PossibleDefaultActionValues() []DefaultAction {
|
||||
return original.PossibleDefaultActionValues()
|
||||
}
|
||||
func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions {
|
||||
return original.PossibleDirectoryServiceOptionsValues()
|
||||
}
|
||||
func PossibleGeoReplicationStatusValues() []GeoReplicationStatus {
|
||||
return original.PossibleGeoReplicationStatusValues()
|
||||
}
|
||||
func PossibleHTTPProtocolValues() []HTTPProtocol {
|
||||
return original.PossibleHTTPProtocolValues()
|
||||
}
|
||||
func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState {
|
||||
return original.PossibleImmutabilityPolicyStateValues()
|
||||
}
|
||||
func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType {
|
||||
return original.PossibleImmutabilityPolicyUpdateTypeValues()
|
||||
}
|
||||
func PossibleKeyPermissionValues() []KeyPermission {
|
||||
return original.PossibleKeyPermissionValues()
|
||||
}
|
||||
func PossibleKeySourceValues() []KeySource {
|
||||
return original.PossibleKeySourceValues()
|
||||
}
|
||||
func PossibleKindValues() []Kind {
|
||||
return original.PossibleKindValues()
|
||||
}
|
||||
func PossibleLargeFileSharesStateValues() []LargeFileSharesState {
|
||||
return original.PossibleLargeFileSharesStateValues()
|
||||
}
|
||||
func PossibleLeaseDurationValues() []LeaseDuration {
|
||||
return original.PossibleLeaseDurationValues()
|
||||
}
|
||||
func PossibleLeaseStateValues() []LeaseState {
|
||||
return original.PossibleLeaseStateValues()
|
||||
}
|
||||
func PossibleLeaseStatusValues() []LeaseStatus {
|
||||
return original.PossibleLeaseStatusValues()
|
||||
}
|
||||
func PossibleListKeyExpandValues() []ListKeyExpand {
|
||||
return original.PossibleListKeyExpandValues()
|
||||
}
|
||||
func PossiblePermissionsValues() []Permissions {
|
||||
return original.PossiblePermissionsValues()
|
||||
}
|
||||
func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState {
|
||||
return original.PossiblePrivateEndpointConnectionProvisioningStateValues()
|
||||
}
|
||||
func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
|
||||
return original.PossiblePrivateEndpointServiceConnectionStatusValues()
|
||||
}
|
||||
func PossibleProvisioningStateValues() []ProvisioningState {
|
||||
return original.PossibleProvisioningStateValues()
|
||||
}
|
||||
func PossiblePublicAccessValues() []PublicAccess {
|
||||
return original.PossiblePublicAccessValues()
|
||||
}
|
||||
func PossibleReasonCodeValues() []ReasonCode {
|
||||
return original.PossibleReasonCodeValues()
|
||||
}
|
||||
func PossibleReasonValues() []Reason {
|
||||
return original.PossibleReasonValues()
|
||||
}
|
||||
func PossibleServicesValues() []Services {
|
||||
return original.PossibleServicesValues()
|
||||
}
|
||||
func PossibleSignedResourceTypesValues() []SignedResourceTypes {
|
||||
return original.PossibleSignedResourceTypesValues()
|
||||
}
|
||||
func PossibleSignedResourceValues() []SignedResource {
|
||||
return original.PossibleSignedResourceValues()
|
||||
}
|
||||
func PossibleSkuNameValues() []SkuName {
|
||||
return original.PossibleSkuNameValues()
|
||||
}
|
||||
func PossibleSkuTierValues() []SkuTier {
|
||||
return original.PossibleSkuTierValues()
|
||||
}
|
||||
func PossibleStateValues() []State {
|
||||
return original.PossibleStateValues()
|
||||
}
|
||||
func PossibleUsageUnitValues() []UsageUnit {
|
||||
return original.PossibleUsageUnitValues()
|
||||
}
|
||||
func UserAgent() string {
|
||||
return original.UserAgent() + " profiles/latest"
|
||||
}
|
||||
func Version() string {
|
||||
return original.Version()
|
||||
}
|
||||
49
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/client.go
generated
vendored
Normal file
49
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/client.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Package operationalinsights implements the Azure ARM Operationalinsights service API version v1.
|
||||
//
|
||||
// Log Analytics Data Plane Client
|
||||
package operationalinsights
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURI is the default URI used for the service Operationalinsights
|
||||
DefaultBaseURI = "https://api.loganalytics.io/v1"
|
||||
)
|
||||
|
||||
// BaseClient is the base client for Operationalinsights.
|
||||
type BaseClient struct {
|
||||
autorest.Client
|
||||
BaseURI string
|
||||
}
|
||||
|
||||
// New creates an instance of the BaseClient client.
|
||||
func New() BaseClient {
|
||||
return NewWithBaseURI(DefaultBaseURI)
|
||||
}
|
||||
|
||||
// NewWithBaseURI creates an instance of the BaseClient client.
|
||||
func NewWithBaseURI(baseURI string) BaseClient {
|
||||
return BaseClient{
|
||||
Client: autorest.NewClientWithUserAgent(UserAgent()),
|
||||
BaseURI: baseURI,
|
||||
}
|
||||
}
|
||||
95
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/models.go
generated
vendored
Normal file
95
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/models.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package operationalinsights
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
// The package's fully qualified name.
|
||||
const fqdn = "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
|
||||
|
||||
// Column a column in a table.
|
||||
type Column struct {
|
||||
// Name - The name of this column.
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Type - The data type of this column.
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetail ...
|
||||
type ErrorDetail struct {
|
||||
// Code - The error's code.
|
||||
Code *string `json:"code,omitempty"`
|
||||
// Message - A human readable error message.
|
||||
Message *string `json:"message,omitempty"`
|
||||
// Target - Indicates which property in the request is responsible for the error.
|
||||
Target *string `json:"target,omitempty"`
|
||||
// Value - Indicates which value in 'target' is responsible for the error.
|
||||
Value *string `json:"value,omitempty"`
|
||||
// Resources - Indicates resources which were responsible for the error.
|
||||
Resources *[]string `json:"resources,omitempty"`
|
||||
AdditionalProperties interface{} `json:"additionalProperties,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorInfo ...
|
||||
type ErrorInfo struct {
|
||||
// Code - A machine readable error code.
|
||||
Code *string `json:"code,omitempty"`
|
||||
// Message - A human readable error message.
|
||||
Message *string `json:"message,omitempty"`
|
||||
// Details - error details.
|
||||
Details *[]ErrorDetail `json:"details,omitempty"`
|
||||
// Innererror - Inner error details if they exist.
|
||||
Innererror *ErrorInfo `json:"innererror,omitempty"`
|
||||
AdditionalProperties interface{} `json:"additionalProperties,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorResponse contains details when the response code indicates an error.
|
||||
type ErrorResponse struct {
|
||||
// Error - The error details.
|
||||
Error *ErrorInfo `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// QueryBody the Analytics query. Learn more about the [Analytics query
|
||||
// syntax](https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/)
|
||||
type QueryBody struct {
|
||||
// Query - The query to execute.
|
||||
Query *string `json:"query,omitempty"`
|
||||
// Timespan - Optional. The timespan over which to query data. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the query expression.
|
||||
Timespan *string `json:"timespan,omitempty"`
|
||||
// Workspaces - A list of workspaces that are included in the query.
|
||||
Workspaces *[]string `json:"workspaces,omitempty"`
|
||||
}
|
||||
|
||||
// QueryResults contains the tables, columns & rows resulting from a query.
|
||||
type QueryResults struct {
|
||||
autorest.Response `json:"-"`
|
||||
// Tables - The list of tables, columns and rows.
|
||||
Tables *[]Table `json:"tables,omitempty"`
|
||||
}
|
||||
|
||||
// Table contains the columns and rows for one table in a query response.
|
||||
type Table struct {
|
||||
// Name - The name of the table.
|
||||
Name *string `json:"name,omitempty"`
|
||||
// Columns - The list of columns in this table.
|
||||
Columns *[]Column `json:"columns,omitempty"`
|
||||
// Rows - The resulting rows from this query.
|
||||
Rows *[][]interface{} `json:"rows,omitempty"`
|
||||
}
|
||||
121
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/query.go
generated
vendored
Normal file
121
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/query.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
package operationalinsights
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// QueryClient is the log Analytics Data Plane Client
|
||||
type QueryClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewQueryClient creates an instance of the QueryClient client.
|
||||
func NewQueryClient() QueryClient {
|
||||
return NewQueryClientWithBaseURI(DefaultBaseURI)
|
||||
}
|
||||
|
||||
// NewQueryClientWithBaseURI creates an instance of the QueryClient client.
|
||||
func NewQueryClientWithBaseURI(baseURI string) QueryClient {
|
||||
return QueryClient{NewWithBaseURI(baseURI)}
|
||||
}
|
||||
|
||||
// Execute executes an Analytics query for data. [Here](https://dev.loganalytics.io/documentation/Using-the-API) is an
|
||||
// example for using POST with an Analytics query.
|
||||
// Parameters:
|
||||
// workspaceID - ID of the workspace. This is Workspace ID from the Properties blade in the Azure portal.
|
||||
// body - the Analytics query. Learn more about the [Analytics query
|
||||
// syntax](https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/)
|
||||
func (client QueryClient) Execute(ctx context.Context, workspaceID string, body QueryBody) (result QueryResults, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/QueryClient.Execute")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: body,
|
||||
Constraints: []validation.Constraint{{Target: "body.Query", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("operationalinsights.QueryClient", "Execute", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ExecutePreparer(ctx, workspaceID, body)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "operationalinsights.QueryClient", "Execute", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ExecuteSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "operationalinsights.QueryClient", "Execute", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ExecuteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "operationalinsights.QueryClient", "Execute", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ExecutePreparer prepares the Execute request.
|
||||
func (client QueryClient) ExecutePreparer(ctx context.Context, workspaceID string, body QueryBody) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"workspaceId": autorest.Encode("path", workspaceID),
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/workspaces/{workspaceId}/query", pathParameters),
|
||||
autorest.WithJSON(body))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ExecuteSender sends the Execute request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client QueryClient) ExecuteSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ExecuteResponder handles the response to the Execute request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client QueryClient) ExecuteResponder(resp *http.Response) (result QueryResults, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
30
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/version.go
generated
vendored
Normal file
30
vendor/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights/version.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package operationalinsights
|
||||
|
||||
import "github.com/Azure/azure-sdk-for-go/version"
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/" + version.Number + " operationalinsights/v1"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return version.Number
|
||||
}
|
||||
1311
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/accounts.go
generated
vendored
Normal file
1311
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/accounts.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1463
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobcontainers.go
generated
vendored
Normal file
1463
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobcontainers.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
341
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobservices.go
generated
vendored
Normal file
341
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/blobservices.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// BlobServicesClient is the the Azure Storage Management API.
|
||||
type BlobServicesClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewBlobServicesClient creates an instance of the BlobServicesClient client.
|
||||
func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
|
||||
return NewBlobServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewBlobServicesClientWithBaseURI creates an instance of the BlobServicesClient client.
|
||||
func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
|
||||
return BlobServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of a storage account’s Blob service, including properties for Storage
|
||||
// Analytics and CORS (Cross-Origin Resource Sharing) rules.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client BlobServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceProperties, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.GetServiceProperties")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.BlobServicesClient", "GetServiceProperties", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetServicePropertiesSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetServicePropertiesResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetServicePropertiesPreparer prepares the GetServiceProperties request.
|
||||
func (client BlobServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"BlobServicesName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client BlobServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List list blob services of storage account. It returns a collection of one object named default.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.BlobServicesClient", "List", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage
|
||||
// Analytics and CORS (Cross-Origin Resource Sharing) rules.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// parameters - the properties of a storage account’s Blob service, including properties for Storage Analytics
|
||||
// and CORS (Cross-Origin Resource Sharing) rules.
|
||||
func (client BlobServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (result BlobServiceProperties, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.SetServiceProperties")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
|
||||
{TargetValue: parameters,
|
||||
Constraints: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
|
||||
{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
|
||||
}},
|
||||
}},
|
||||
{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
|
||||
{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
|
||||
}},
|
||||
}},
|
||||
}}}}}); err != nil {
|
||||
return result, validation.NewError("storage.BlobServicesClient", "SetServiceProperties", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.SetServicePropertiesSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.SetServicePropertiesResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// SetServicePropertiesPreparer prepares the SetServiceProperties request.
|
||||
func (client BlobServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"BlobServicesName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client BlobServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client BlobServicesClient) SetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
51
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
generated
vendored
Normal file
51
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/client.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Package storage implements the Azure ARM Storage service API version 2019-06-01.
|
||||
//
|
||||
// The Azure Storage Management API.
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURI is the default URI used for the service Storage
|
||||
DefaultBaseURI = "https://management.azure.com"
|
||||
)
|
||||
|
||||
// BaseClient is the base client for Storage.
|
||||
type BaseClient struct {
|
||||
autorest.Client
|
||||
BaseURI string
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
// New creates an instance of the BaseClient client.
|
||||
func New(subscriptionID string) BaseClient {
|
||||
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewWithBaseURI creates an instance of the BaseClient client.
|
||||
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
|
||||
return BaseClient{
|
||||
Client: autorest.NewClientWithUserAgent(UserAgent()),
|
||||
BaseURI: baseURI,
|
||||
SubscriptionID: subscriptionID,
|
||||
}
|
||||
}
|
||||
335
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileservices.go
generated
vendored
Normal file
335
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileservices.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// FileServicesClient is the the Azure Storage Management API.
|
||||
type FileServicesClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewFileServicesClient creates an instance of the FileServicesClient client.
|
||||
func NewFileServicesClient(subscriptionID string) FileServicesClient {
|
||||
return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client.
|
||||
func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
|
||||
return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
|
||||
// Sharing) rules.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetServicePropertiesSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetServicePropertiesResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetServicePropertiesPreparer prepares the GetServiceProperties request.
|
||||
func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"FileServicesName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List list all file services in storage accounts
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileServicesClient", "List", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
|
||||
// Sharing) rules.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource
|
||||
// Sharing) rules.
|
||||
func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
|
||||
{TargetValue: parameters,
|
||||
Constraints: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil},
|
||||
{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
|
||||
}},
|
||||
}},
|
||||
}}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.SetServicePropertiesSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.SetServicePropertiesResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// SetServicePropertiesPreparer prepares the SetServiceProperties request.
|
||||
func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"FileServicesName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
590
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileshares.go
generated
vendored
Normal file
590
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/fileshares.go
generated
vendored
Normal file
@ -0,0 +1,590 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// FileSharesClient is the the Azure Storage Management API.
|
||||
type FileSharesClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewFileSharesClient creates an instance of the FileSharesClient client.
|
||||
func NewFileSharesClient(subscriptionID string) FileSharesClient {
|
||||
return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client.
|
||||
func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
|
||||
return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// Create creates a new share under the specified account as described by request body. The share resource includes
|
||||
// metadata and properties for that share. It does not include a list of the files contained by the share.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// shareName - the name of the file share within the specified storage account. File share names must be
|
||||
// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
|
||||
// character must be immediately preceded and followed by a letter or number.
|
||||
// fileShare - properties of the file share to create.
|
||||
func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: shareName,
|
||||
Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
|
||||
{Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: fileShare,
|
||||
Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(5120), Chain: nil},
|
||||
{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
|
||||
}},
|
||||
}}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileSharesClient", "Create", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CreatePreparer prepares the Create request.
|
||||
func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"shareName": autorest.Encode("path", shareName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
|
||||
autorest.WithJSON(fileShare),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// CreateSender sends the Create request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// CreateResponder handles the response to the Create request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete deletes specified share under its account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// shareName - the name of the file share within the specified storage account. File share names must be
|
||||
// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
|
||||
// character must be immediately preceded and followed by a letter or number.
|
||||
func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: shareName,
|
||||
Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
|
||||
{Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"shareName": autorest.Encode("path", shareName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// Get gets properties of a specified share.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// shareName - the name of the file share within the specified storage account. File share names must be
|
||||
// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
|
||||
// character must be immediately preceded and followed by a letter or number.
|
||||
func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result FileShare, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: shareName,
|
||||
Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
|
||||
{Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileSharesClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPreparer prepares the Get request.
|
||||
func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"shareName": autorest.Encode("path", shareName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetSender sends the Get request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// GetResponder handles the response to the Get request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List lists all shares.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// maxpagesize - optional. Specified maximum number of shares that can be included in the list.
|
||||
// filter - optional. When specified, only share names starting with the filter will be listed.
|
||||
func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result FileShareItemsPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.fsi.Response.Response != nil {
|
||||
sc = result.fsi.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileSharesClient", "List", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.fsi.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.fsi, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if len(maxpagesize) > 0 {
|
||||
queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
|
||||
}
|
||||
if len(filter) > 0 {
|
||||
queryParameters["$filter"] = autorest.Encode("query", filter)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) {
|
||||
req, err := lastResults.fileShareItemsPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result FileShareItemsIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter)
|
||||
return
|
||||
}
|
||||
|
||||
// Update updates share properties as specified in request body. Properties not mentioned in the request will not be
|
||||
// changed. Update fails if the specified share does not already exist.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// shareName - the name of the file share within the specified storage account. File share names must be
|
||||
// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
|
||||
// character must be immediately preceded and followed by a letter or number.
|
||||
// fileShare - properties to update for the file share.
|
||||
func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: shareName,
|
||||
Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
|
||||
{Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.FileSharesClient", "Update", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.UpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.UpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UpdatePreparer prepares the Update request.
|
||||
func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"shareName": autorest.Encode("path", shareName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPatch(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
|
||||
autorest.WithJSON(fileShare),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// UpdateSender sends the Update request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// UpdateResponder handles the response to the Update request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
328
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/managementpolicies.go
generated
vendored
Normal file
328
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/managementpolicies.go
generated
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ManagementPoliciesClient is the the Azure Storage Management API.
|
||||
type ManagementPoliciesClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewManagementPoliciesClient creates an instance of the ManagementPoliciesClient client.
|
||||
func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient {
|
||||
return NewManagementPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewManagementPoliciesClientWithBaseURI creates an instance of the ManagementPoliciesClient client.
|
||||
func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient {
|
||||
return ManagementPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// CreateOrUpdate sets the managementpolicy to the specified storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// properties - the ManagementPolicy set to a storage account.
|
||||
func (client ManagementPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (result ManagementPolicy, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.CreateOrUpdate")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
|
||||
{TargetValue: properties,
|
||||
Constraints: []validation.Constraint{{Target: "properties.ManagementPolicyProperties", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy", Name: validation.Null, Rule: true,
|
||||
Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}}},
|
||||
}}}}}); err != nil {
|
||||
return result, validation.NewError("storage.ManagementPoliciesClient", "CreateOrUpdate", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateOrUpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateOrUpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
|
||||
func (client ManagementPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"managementPolicyName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
|
||||
autorest.WithJSON(properties),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ManagementPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ManagementPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ManagementPolicy, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete deletes the managementpolicy associated with the specified storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client ManagementPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.ManagementPoliciesClient", "Delete", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client ManagementPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"managementPolicyName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ManagementPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ManagementPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// Get gets the managementpolicy associated with the specified storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client ManagementPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result ManagementPolicy, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.ManagementPoliciesClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPreparer prepares the Get request.
|
||||
func (client ManagementPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"managementPolicyName": autorest.Encode("path", "default"),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetSender sends the Get request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ManagementPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// GetResponder handles the response to the Get request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ManagementPoliciesClient) GetResponder(resp *http.Response) (result ManagementPolicy, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
3271
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/models.go
generated
vendored
Normal file
3271
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/models.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
109
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/operations.go
generated
vendored
Normal file
109
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/operations.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// OperationsClient is the the Azure Storage Management API.
|
||||
type OperationsClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewOperationsClient creates an instance of the OperationsClient client.
|
||||
func NewOperationsClient(subscriptionID string) OperationsClient {
|
||||
return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
|
||||
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
|
||||
return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// List lists all of the available Storage Rest API operations.
|
||||
func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.ListPreparer(ctx)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPath("/providers/Microsoft.Storage/operations"),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
332
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privateendpointconnections.go
generated
vendored
Normal file
332
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privateendpointconnections.go
generated
vendored
Normal file
@ -0,0 +1,332 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// PrivateEndpointConnectionsClient is the the Azure Storage Management API.
|
||||
type PrivateEndpointConnectionsClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client.
|
||||
func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient {
|
||||
return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client.
|
||||
func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient {
|
||||
return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// Delete deletes the specified private endpoint connection associated with the storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// privateEndpointConnectionName - the name of the private endpoint connection associated with the Storage
|
||||
// Account
|
||||
func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Delete", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// Get gets the specified private endpoint connection associated with the storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// privateEndpointConnectionName - the name of the private endpoint connection associated with the Storage
|
||||
// Account
|
||||
func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPreparer prepares the Get request.
|
||||
func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetSender sends the Get request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// GetResponder handles the response to the Get request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Put update the state of specified private endpoint connection associated with the storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
// privateEndpointConnectionName - the name of the private endpoint connection associated with the Storage
|
||||
// Account
|
||||
// properties - the private endpoint connection properties.
|
||||
func (client PrivateEndpointConnectionsClient) Put(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (result PrivateEndpointConnection, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Put")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
|
||||
{TargetValue: properties,
|
||||
Constraints: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
|
||||
return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Put", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.PutPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName, properties)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.PutSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.PutResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PutPreparer prepares the Put request.
|
||||
func (client PrivateEndpointConnectionsClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
|
||||
autorest.WithJSON(properties),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// PutSender sends the Put request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client PrivateEndpointConnectionsClient) PutSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// PutResponder handles the response to the Put request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client PrivateEndpointConnectionsClient) PutResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
134
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privatelinkresources.go
generated
vendored
Normal file
134
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/privatelinkresources.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// PrivateLinkResourcesClient is the the Azure Storage Management API.
|
||||
type PrivateLinkResourcesClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewPrivateLinkResourcesClient creates an instance of the PrivateLinkResourcesClient client.
|
||||
func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient {
|
||||
return NewPrivateLinkResourcesClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewPrivateLinkResourcesClientWithBaseURI creates an instance of the PrivateLinkResourcesClient client.
|
||||
func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient {
|
||||
return PrivateLinkResourcesClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// ListByStorageAccount gets the private link resources that need to be created for a storage account.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
|
||||
// insensitive.
|
||||
// accountName - the name of the storage account within the specified resource group. Storage account names
|
||||
// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client PrivateLinkResourcesClient) ListByStorageAccount(ctx context.Context, resourceGroupName string, accountName string) (result PrivateLinkResourceListResult, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateLinkResourcesClient.ListByStorageAccount")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.PrivateLinkResourcesClient", "ListByStorageAccount", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ListByStorageAccountPreparer(ctx, resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListByStorageAccountSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListByStorageAccountResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListByStorageAccountPreparer prepares the ListByStorageAccount request.
|
||||
func (client PrivateLinkResourcesClient) ListByStorageAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListByStorageAccountSender sends the ListByStorageAccount request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client PrivateLinkResourcesClient) ListByStorageAccountSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListByStorageAccountResponder handles the response to the ListByStorageAccount request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client PrivateLinkResourcesClient) ListByStorageAccountResponder(resp *http.Response) (result PrivateLinkResourceListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
120
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/skus.go
generated
vendored
Normal file
120
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/skus.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// SkusClient is the the Azure Storage Management API.
|
||||
type SkusClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewSkusClient creates an instance of the SkusClient client.
|
||||
func NewSkusClient(subscriptionID string) SkusClient {
|
||||
return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewSkusClientWithBaseURI creates an instance of the SkusClient client.
|
||||
func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient {
|
||||
return SkusClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// List lists the available SKUs supported by Microsoft.Storage for given subscription.
|
||||
func (client SkusClient) List(ctx context.Context) (result SkuListResult, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/SkusClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.SkusClient", "List", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ListPreparer(ctx)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client SkusClient) ListPreparer(ctx context.Context) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
123
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/usages.go
generated
vendored
Normal file
123
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/usages.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// UsagesClient is the the Azure Storage Management API.
|
||||
type UsagesClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewUsagesClient creates an instance of the UsagesClient client.
|
||||
func NewUsagesClient(subscriptionID string) UsagesClient {
|
||||
return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client.
|
||||
func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
|
||||
return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// ListByLocation gets the current usage count and the limit for the resources of the location under the subscription.
|
||||
// Parameters:
|
||||
// location - the location of the Azure Storage resource.
|
||||
func (client UsagesClient) ListByLocation(ctx context.Context, location string) (result UsageListResult, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.ListByLocation")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: client.SubscriptionID,
|
||||
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("storage.UsagesClient", "ListByLocation", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ListByLocationPreparer(ctx, location)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListByLocationSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListByLocationResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListByLocationPreparer prepares the ListByLocation request.
|
||||
func (client UsagesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"location": autorest.Encode("path", location),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-06-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListByLocationSender sends the ListByLocation request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client UsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) {
|
||||
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
|
||||
return autorest.SendWithSender(client, req, sd...)
|
||||
}
|
||||
|
||||
// ListByLocationResponder handles the response to the ListByLocation request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client UsagesClient) ListByLocationResponder(resp *http.Response) (result UsageListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
30
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/version.go
generated
vendored
Normal file
30
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage/version.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package storage
|
||||
|
||||
import "github.com/Azure/azure-sdk-for-go/version"
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/" + version.Number + " storage/2019-06-01"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return version.Number
|
||||
}
|
||||
21
vendor/github.com/Azure/azure-sdk-for-go/version/version.go
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-sdk-for-go/version/version.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package version
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// Number contains the semantic version of this SDK.
|
||||
const Number = "v36.1.0"
|
||||
21
vendor/github.com/Azure/azure-storage-file-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-storage-file-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
315
vendor/github.com/Azure/azure-storage-file-go/azfile/highlevel.go
generated
vendored
Normal file
315
vendor/github.com/Azure/azure-storage-file-go/azfile/highlevel.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"bytes"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultParallelCount specifies default parallel count will be used by parallel upload/download methods
|
||||
defaultParallelCount = 5
|
||||
|
||||
// fileSegmentSize specifies file segment size that file would be splitted into during parallel upload/download
|
||||
fileSegmentSize = 500 * 1024 * 1024
|
||||
)
|
||||
|
||||
// UploadToAzureFileOptions identifies options used by the UploadBufferToAzureFile and UploadFileToAzureFile functions.
|
||||
type UploadToAzureFileOptions struct {
|
||||
// RangeSize specifies the range size to use in each parallel upload; the default (and maximum size) is FileMaxUploadRangeBytes.
|
||||
RangeSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are send in a UploadRange call to the FileURL.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// Parallelism indicates the maximum number of ranges to upload in parallel. If 0(default) is provided, 5 parallelism will be used by default.
|
||||
Parallelism uint16
|
||||
|
||||
// FileHTTPHeaders contains read/writeable file properties.
|
||||
FileHTTPHeaders FileHTTPHeaders
|
||||
|
||||
// Metadata contains metadata key/value pairs.
|
||||
Metadata Metadata
|
||||
}
|
||||
|
||||
// UploadBufferToAzureFile uploads a buffer to an Azure file.
|
||||
// Note: o.RangeSize must be >= 0 and <= FileMaxUploadRangeBytes, and if not specified, method will use FileMaxUploadRangeBytes by default.
|
||||
// The total size to be uploaded should be <= FileMaxSizeInBytes.
|
||||
func UploadBufferToAzureFile(ctx context.Context, b []byte,
|
||||
fileURL FileURL, o UploadToAzureFileOptions) error {
|
||||
|
||||
// 1. Validate parameters, and set defaults.
|
||||
if o.RangeSize < 0 || o.RangeSize > FileMaxUploadRangeBytes {
|
||||
return fmt.Errorf("invalid argument, o.RangeSize must be >= 0 and <= %d, in bytes", FileMaxUploadRangeBytes)
|
||||
}
|
||||
if o.RangeSize == 0 {
|
||||
o.RangeSize = FileMaxUploadRangeBytes
|
||||
}
|
||||
|
||||
size := int64(len(b))
|
||||
|
||||
parallelism := o.Parallelism
|
||||
if parallelism == 0 {
|
||||
parallelism = defaultParallelCount // default parallelism
|
||||
}
|
||||
|
||||
// 2. Try to create the Azure file.
|
||||
_, err := fileURL.Create(ctx, size, o.FileHTTPHeaders, o.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If size equals to 0, upload nothing and directly return.
|
||||
if size == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 3. Prepare and do parallel upload.
|
||||
fileProgress := int64(0)
|
||||
progressLock := &sync.Mutex{}
|
||||
|
||||
return doBatchTransfer(ctx, batchTransferOptions{
|
||||
transferSize: size,
|
||||
chunkSize: o.RangeSize,
|
||||
parallelism: parallelism,
|
||||
operation: func(offset int64, curRangeSize int64) error {
|
||||
// Prepare to read the proper section of the buffer.
|
||||
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+curRangeSize])
|
||||
if o.Progress != nil {
|
||||
rangeProgress := int64(0)
|
||||
body = pipeline.NewRequestBodyProgress(body,
|
||||
func(bytesTransferred int64) {
|
||||
diff := bytesTransferred - rangeProgress
|
||||
rangeProgress = bytesTransferred
|
||||
progressLock.Lock()
|
||||
defer progressLock.Unlock()
|
||||
fileProgress += diff
|
||||
o.Progress(fileProgress)
|
||||
})
|
||||
}
|
||||
|
||||
_, err := fileURL.UploadRange(ctx, int64(offset), body, nil)
|
||||
return err
|
||||
},
|
||||
operationName: "UploadBufferToAzureFile",
|
||||
})
|
||||
}
|
||||
|
||||
// UploadFileToAzureFile uploads a local file to an Azure file.
|
||||
func UploadFileToAzureFile(ctx context.Context, file *os.File,
|
||||
fileURL FileURL, o UploadToAzureFileOptions) error {
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m := mmf{} // Default to an empty slice; used for 0-size file
|
||||
if stat.Size() != 0 {
|
||||
m, err = newMMF(file, false, 0, int(stat.Size()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer m.unmap()
|
||||
}
|
||||
return UploadBufferToAzureFile(ctx, m, fileURL, o)
|
||||
}
|
||||
|
||||
// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
|
||||
type DownloadFromAzureFileOptions struct {
|
||||
// RangeSize specifies the range size to use in each parallel download; the default is FileMaxUploadRangeBytes.
|
||||
RangeSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are recieved.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// Parallelism indicates the maximum number of ranges to download in parallel. If 0(default) is provided, 5 parallelism will be used by default.
|
||||
Parallelism uint16
|
||||
|
||||
// Max retry requests used during reading data for each range.
|
||||
MaxRetryRequestsPerRange int
|
||||
}
|
||||
|
||||
// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
|
||||
// Note: o.RangeSize must be >= 0.
|
||||
func downloadAzureFileToBuffer(ctx context.Context, fileURL FileURL, azfileProperties *FileGetPropertiesResponse,
|
||||
b []byte, o DownloadFromAzureFileOptions) (*FileGetPropertiesResponse, error) {
|
||||
|
||||
// 1. Validate parameters, and set defaults.
|
||||
if o.RangeSize < 0 {
|
||||
return nil, errors.New("invalid argument, o.RangeSize must be >= 0")
|
||||
}
|
||||
if o.RangeSize == 0 {
|
||||
o.RangeSize = FileMaxUploadRangeBytes
|
||||
}
|
||||
|
||||
if azfileProperties == nil {
|
||||
p, err := fileURL.GetProperties(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
azfileProperties = p
|
||||
}
|
||||
azfileSize := azfileProperties.ContentLength()
|
||||
|
||||
// If azure file size equals to 0, directly return as nothing need be downloaded.
|
||||
if azfileSize == 0 {
|
||||
return azfileProperties, nil
|
||||
}
|
||||
|
||||
if int64(len(b)) < azfileSize {
|
||||
sanityCheckFailed(fmt.Sprintf("The buffer's size should be equal to or larger than Azure file's size: %d.", azfileSize))
|
||||
}
|
||||
|
||||
parallelism := o.Parallelism
|
||||
if parallelism == 0 {
|
||||
parallelism = defaultParallelCount // default parallelism
|
||||
}
|
||||
|
||||
// 2. Prepare and do parallel download.
|
||||
fileProgress := int64(0)
|
||||
progressLock := &sync.Mutex{}
|
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{
|
||||
transferSize: azfileSize,
|
||||
chunkSize: o.RangeSize,
|
||||
parallelism: parallelism,
|
||||
operation: func(offset int64, curRangeSize int64) error {
|
||||
dr, err := fileURL.Download(ctx, offset, curRangeSize, false)
|
||||
body := dr.Body(RetryReaderOptions{MaxRetryRequests: o.MaxRetryRequestsPerRange})
|
||||
|
||||
if o.Progress != nil {
|
||||
rangeProgress := int64(0)
|
||||
body = pipeline.NewResponseBodyProgress(
|
||||
body,
|
||||
func(bytesTransferred int64) {
|
||||
diff := bytesTransferred - rangeProgress
|
||||
rangeProgress = bytesTransferred
|
||||
progressLock.Lock()
|
||||
defer progressLock.Unlock()
|
||||
fileProgress += diff
|
||||
o.Progress(fileProgress)
|
||||
})
|
||||
}
|
||||
|
||||
_, err = io.ReadFull(body, b[offset:offset+curRangeSize])
|
||||
body.Close()
|
||||
|
||||
return err
|
||||
},
|
||||
operationName: "downloadAzureFileToBuffer",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return azfileProperties, nil
|
||||
}
|
||||
|
||||
// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
|
||||
func DownloadAzureFileToBuffer(ctx context.Context, fileURL FileURL,
|
||||
b []byte, o DownloadFromAzureFileOptions) (*FileGetPropertiesResponse, error) {
|
||||
return downloadAzureFileToBuffer(ctx, fileURL, nil, b, o)
|
||||
}
|
||||
|
||||
// DownloadAzureFileToFile downloads an Azure file to a local file.
|
||||
// The file would be created if it doesn't exist, and would be truncated if the size doesn't match.
|
||||
// Note: file can't be nil.
|
||||
func DownloadAzureFileToFile(ctx context.Context, fileURL FileURL, file *os.File, o DownloadFromAzureFileOptions) (*FileGetPropertiesResponse, error) {
|
||||
// 1. Validate parameters.
|
||||
if file == nil {
|
||||
return nil, errors.New("invalid argument, file can't be nil")
|
||||
}
|
||||
|
||||
// 2. Try to get Azure file's size.
|
||||
azfileProperties, err := fileURL.GetProperties(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
azfileSize := azfileProperties.ContentLength()
|
||||
|
||||
// 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat.Size() != azfileSize {
|
||||
if err = file.Truncate(azfileSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Set mmap and call DownloadAzureFileToBuffer, in this case file size should be > 0.
|
||||
m := mmf{} // Default to an empty slice; used for 0-size file
|
||||
if azfileSize > 0 {
|
||||
m, err = newMMF(file, true, 0, int(azfileSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer m.unmap()
|
||||
}
|
||||
|
||||
return downloadAzureFileToBuffer(ctx, fileURL, azfileProperties, m, o)
|
||||
}
|
||||
|
||||
// BatchTransferOptions identifies options used by doBatchTransfer.
|
||||
type batchTransferOptions struct {
|
||||
transferSize int64
|
||||
chunkSize int64
|
||||
parallelism uint16
|
||||
operation func(offset int64, chunkSize int64) error
|
||||
operationName string
|
||||
}
|
||||
|
||||
// doBatchTransfer helps to execute operations in a batch manner.
|
||||
func doBatchTransfer(ctx context.Context, o batchTransferOptions) error {
|
||||
// Prepare and do parallel operations.
|
||||
numChunks := ((o.transferSize - 1) / o.chunkSize) + 1
|
||||
operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently
|
||||
operationResponseChannel := make(chan error, numChunks) // Holds each response
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Create the goroutines that process each operation (in parallel).
|
||||
for g := uint16(0); g < o.parallelism; g++ {
|
||||
//grIndex := g
|
||||
go func() {
|
||||
for f := range operationChannel {
|
||||
//fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex)
|
||||
err := f()
|
||||
operationResponseChannel <- err
|
||||
//fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
curChunkSize := o.chunkSize
|
||||
// Add each chunk's operation to the channel.
|
||||
for chunkIndex := int64(0); chunkIndex < numChunks; chunkIndex++ {
|
||||
if chunkIndex == numChunks-1 { // Last chunk
|
||||
curChunkSize = o.transferSize - (int64(chunkIndex) * o.chunkSize) // Remove size of all transferred chunks from total
|
||||
}
|
||||
offset := int64(chunkIndex) * o.chunkSize
|
||||
|
||||
closureChunkSize := curChunkSize
|
||||
operationChannel <- func() error {
|
||||
return o.operation(offset, closureChunkSize)
|
||||
}
|
||||
}
|
||||
close(operationChannel)
|
||||
|
||||
// Wait for the operations to complete.
|
||||
for chunkIndex := int64(0); chunkIndex < numChunks; chunkIndex++ {
|
||||
responseError := <-operationResponseChannel
|
||||
if responseError != nil {
|
||||
cancel() // As soon as any operation fails, cancel all remaining operation calls
|
||||
return responseError // No need to process anymore responses
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
159
vendor/github.com/Azure/azure-storage-file-go/azfile/parsing_urls.go
generated
vendored
Normal file
159
vendor/github.com/Azure/azure-storage-file-go/azfile/parsing_urls.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
shareSnapshot = "sharesnapshot"
|
||||
)
|
||||
|
||||
// A FileURLParts object represents the components that make up an Azure Storage Share/Directory/File URL. You parse an
|
||||
// existing URL into its parts by calling NewFileURLParts(). You construct a URL from parts by calling URL().
|
||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
||||
type FileURLParts struct {
|
||||
Scheme string // Ex: "https://"
|
||||
Host string // Ex: "account.share.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
||||
ShareName string // Share name, Ex: "myshare"
|
||||
DirectoryOrFilePath string // Path of directory or file, Ex: "mydirectory/myfile"
|
||||
ShareSnapshot string // IsZero is true if not a snapshot
|
||||
SAS SASQueryParameters
|
||||
UnparsedParams string
|
||||
IPEndpointStyleInfo IPEndpointStyleInfo // Useful Parts for IP endpoint style URL.
|
||||
}
|
||||
|
||||
// IPEndpointStyleInfo is used for IP endpoint style URL.
|
||||
// It's commonly used when working with Azure storage emulator or testing environments.
|
||||
// Ex: "https://10.132.141.33/accountname/sharename"
|
||||
type IPEndpointStyleInfo struct {
|
||||
AccountName string // "" if not using IP endpoint style
|
||||
}
|
||||
|
||||
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
||||
// http(s)://IP(:port)/storageaccount/share(||container||etc)/...
|
||||
// As url's Host property, host could be both host or host:port
|
||||
func isIPEndpointStyle(host string) bool {
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||
host = h
|
||||
}
|
||||
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
||||
// In this case, eliminate the '[' and ']' in the URL.
|
||||
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
return net.ParseIP(host) != nil
|
||||
}
|
||||
|
||||
// NewFileURLParts parses a URL initializing FileURLParts' fields including any SAS-related & sharesnapshot query parameters. Any other
|
||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the FileURLParts object.
|
||||
func NewFileURLParts(u url.URL) FileURLParts {
|
||||
up := FileURLParts{
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
IPEndpointStyleInfo: IPEndpointStyleInfo{},
|
||||
}
|
||||
|
||||
if u.Path != "" {
|
||||
path := u.Path
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
if isIPEndpointStyle(up.Host) {
|
||||
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no share, path of directory or file
|
||||
up.IPEndpointStyleInfo.AccountName = path
|
||||
} else {
|
||||
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
||||
|
||||
path = path[accountEndIndex+1:]
|
||||
// Find the next slash (if it exists)
|
||||
if shareEndIndex := strings.Index(path, "/"); shareEndIndex == -1 { // Slash not found; path has share name & no path of directory or file
|
||||
up.ShareName = path
|
||||
} else { // Slash found; path has share name & path of directory or file
|
||||
up.ShareName = path[:shareEndIndex]
|
||||
up.DirectoryOrFilePath = path[shareEndIndex+1:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Find the next slash (if it exists)
|
||||
if shareEndIndex := strings.Index(path, "/"); shareEndIndex == -1 { // Slash not found; path has share name & no path of directory or file
|
||||
up.ShareName = path
|
||||
} else { // Slash found; path has share name & path of directory or file
|
||||
up.ShareName = path[:shareEndIndex]
|
||||
up.DirectoryOrFilePath = path[shareEndIndex+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the query parameters to a case-sensitive map & trim whitespace
|
||||
paramsMap := u.Query()
|
||||
|
||||
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(shareSnapshot); ok {
|
||||
up.ShareSnapshot = snapshotStr[0]
|
||||
// If we recognized the query parameter, remove it from the map
|
||||
delete(paramsMap, shareSnapshot)
|
||||
}
|
||||
up.SAS = newSASQueryParameters(paramsMap, true)
|
||||
up.UnparsedParams = paramsMap.Encode()
|
||||
return up
|
||||
}
|
||||
|
||||
type caseInsensitiveValues url.Values // map[string][]string
|
||||
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
|
||||
key = strings.ToLower(key)
|
||||
for k, v := range values {
|
||||
if strings.ToLower(k) == key {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return []string{}, false
|
||||
}
|
||||
|
||||
// URL returns a URL object whose fields are initialized from the FileURLParts fields. The URL's RawQuery
|
||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
||||
func (up FileURLParts) URL() url.URL {
|
||||
path := ""
|
||||
// Concatenate account name for IP endpoint style URL
|
||||
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
|
||||
path += "/" + up.IPEndpointStyleInfo.AccountName
|
||||
}
|
||||
// Concatenate share & path of directory or file (if they exist)
|
||||
if up.ShareName != "" {
|
||||
path += "/" + up.ShareName
|
||||
if up.DirectoryOrFilePath != "" {
|
||||
path += "/" + up.DirectoryOrFilePath
|
||||
}
|
||||
}
|
||||
|
||||
rawQuery := up.UnparsedParams
|
||||
|
||||
// Concatenate share snapshot query parameter (if it exists)
|
||||
if up.ShareSnapshot != "" {
|
||||
if len(rawQuery) > 0 {
|
||||
rawQuery += "&"
|
||||
}
|
||||
rawQuery += shareSnapshot + "=" + up.ShareSnapshot
|
||||
}
|
||||
sas := up.SAS.Encode()
|
||||
if sas != "" {
|
||||
if len(rawQuery) > 0 {
|
||||
rawQuery += "&"
|
||||
}
|
||||
rawQuery += sas
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: up.Scheme,
|
||||
Host: up.Host,
|
||||
Path: path,
|
||||
RawQuery: rawQuery,
|
||||
}
|
||||
return u
|
||||
}
|
||||
207
vendor/github.com/Azure/azure-storage-file-go/azfile/sas_service.go
generated
vendored
Normal file
207
vendor/github.com/Azure/azure-storage-file-go/azfile/sas_service.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage share or file.
|
||||
type FileSASSignatureValues struct {
|
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
Permissions string `param:"sp"` // Create by initializing a ShareSASPermissions or FileSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"`
|
||||
Identifier string `param:"si"`
|
||||
ShareName string
|
||||
FilePath string // Ex: "directory/FileName" or "FileName". Use "" to create a Share SAS.
|
||||
CacheControl string // rscc
|
||||
ContentDisposition string // rscd
|
||||
ContentEncoding string // rsce
|
||||
ContentLanguage string // rscl
|
||||
ContentType string // rsct
|
||||
}
|
||||
|
||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
func (v FileSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
|
||||
if sharedKeyCredential == nil {
|
||||
return SASQueryParameters{}, errors.New("sharedKeyCredential can't be nil")
|
||||
}
|
||||
|
||||
resource := "s"
|
||||
if v.FilePath == "" {
|
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &ShareSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
} else {
|
||||
resource = "f"
|
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &FileSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
}
|
||||
if v.Version == "" {
|
||||
v.Version = SASVersion
|
||||
}
|
||||
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
|
||||
|
||||
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
stringToSign := strings.Join([]string{
|
||||
v.Permissions,
|
||||
startTime,
|
||||
expiryTime,
|
||||
getCanonicalName(sharedKeyCredential.AccountName(), v.ShareName, v.FilePath),
|
||||
v.Identifier,
|
||||
v.IPRange.String(),
|
||||
string(v.Protocol),
|
||||
v.Version,
|
||||
v.CacheControl, // rscc
|
||||
v.ContentDisposition, // rscd
|
||||
v.ContentEncoding, // rsce
|
||||
v.ContentLanguage, // rscl
|
||||
v.ContentType}, // rsct
|
||||
"\n")
|
||||
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
|
||||
|
||||
p := SASQueryParameters{
|
||||
// Common SAS parameters
|
||||
version: v.Version,
|
||||
protocol: v.Protocol,
|
||||
startTime: v.StartTime,
|
||||
expiryTime: v.ExpiryTime,
|
||||
permissions: v.Permissions,
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Share/File-specific SAS parameters
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
cacheControl: v.CacheControl,
|
||||
contentDisposition: v.ContentDisposition,
|
||||
contentEncoding: v.ContentEncoding,
|
||||
contentLanguage: v.ContentLanguage,
|
||||
contentType: v.ContentType,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// getCanonicalName computes the canonical name for a share or file resource for SAS signing.
|
||||
func getCanonicalName(account string, shareName string, filePath string) string {
|
||||
// Share: "/file/account/sharename"
|
||||
// File: "/file/account/sharename/filename"
|
||||
// File: "/file/account/sharename/directoryname/filename"
|
||||
elements := []string{"/file/", account, "/", shareName}
|
||||
if filePath != "" {
|
||||
dfp := strings.Replace(filePath, "\\", "/", -1)
|
||||
if dfp[0] == '/' {
|
||||
dfp = dfp[1:]
|
||||
}
|
||||
elements = append(elements, "/", dfp)
|
||||
}
|
||||
return strings.Join(elements, "")
|
||||
}
|
||||
|
||||
// The ShareSASPermissions type simplifies creating the permissions string for an Azure Storage share SAS.
|
||||
// Initialize an instance of this type and then call its String method to set FileSASSignatureValues's Permissions field.
|
||||
type ShareSASPermissions struct {
|
||||
Read, Create, Write, Delete, List bool
|
||||
}
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage share.
|
||||
// Call this method to set FileSASSignatureValues's Permissions field.
|
||||
func (p ShareSASPermissions) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
b.WriteRune('l')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the ShareSASPermissions's fields from a string.
|
||||
func (p *ShareSASPermissions) Parse(s string) error {
|
||||
*p = ShareSASPermissions{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The FileSASPermissions type simplifies creating the permissions string for an Azure Storage file SAS.
|
||||
// Initialize an instance of this type and then call its String method to set FileSASSignatureValues's Permissions field.
|
||||
type FileSASPermissions struct{ Read, Create, Write, Delete bool }
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage file.
|
||||
// Call this method to set FileSASSignatureValues's Permissions field.
|
||||
func (p FileSASPermissions) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the FileSASPermissions's fields from a string.
|
||||
func (p *FileSASPermissions) Parse(s string) error {
|
||||
*p = FileSASPermissions{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
58
vendor/github.com/Azure/azure-storage-file-go/azfile/service_codes_file.go
generated
vendored
Normal file
58
vendor/github.com/Azure/azure-storage-file-go/azfile/service_codes_file.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package azfile
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/file-service-error-codes
|
||||
|
||||
// ServiceCode values indicate a service failure.
|
||||
const (
|
||||
// The file or directory could not be deleted because it is in use by an SMB client (409).
|
||||
ServiceCodeCannotDeleteFileOrDirectory ServiceCodeType = "CannotDeleteFileOrDirectory"
|
||||
|
||||
// The specified resource state could not be flushed from an SMB client in the specified time (500).
|
||||
ServiceCodeClientCacheFlushDelay ServiceCodeType = "ClientCacheFlushDelay"
|
||||
|
||||
// The specified resource is marked for deletion by an SMB client (409).
|
||||
ServiceCodeDeletePending ServiceCodeType = "DeletePending"
|
||||
|
||||
// The specified directory is not empty (409).
|
||||
ServiceCodeDirectoryNotEmpty ServiceCodeType = "DirectoryNotEmpty"
|
||||
|
||||
// A portion of the specified file is locked by an SMB client (409).
|
||||
ServiceCodeFileLockConflict ServiceCodeType = "FileLockConflict"
|
||||
|
||||
// File or directory path is too long (400).
|
||||
// Or File or directory path has too many subdirectories (400).
|
||||
ServiceCodeInvalidFileOrDirectoryPathName ServiceCodeType = "InvalidFileOrDirectoryPathName"
|
||||
|
||||
// The specified parent path does not exist (404).
|
||||
ServiceCodeParentNotFound ServiceCodeType = "ParentNotFound"
|
||||
|
||||
// The specified resource is read-only and cannot be modified at this time (409).
|
||||
ServiceCodeReadOnlyAttribute ServiceCodeType = "ReadOnlyAttribute"
|
||||
|
||||
// The specified share already exists (409).
|
||||
ServiceCodeShareAlreadyExists ServiceCodeType = "ShareAlreadyExists"
|
||||
|
||||
// The specified share is being deleted. Try operation later (409).
|
||||
ServiceCodeShareBeingDeleted ServiceCodeType = "ShareBeingDeleted"
|
||||
|
||||
// The specified share is disabled by the administrator (403).
|
||||
ServiceCodeShareDisabled ServiceCodeType = "ShareDisabled"
|
||||
|
||||
// The specified share does not exist (404).
|
||||
ServiceCodeShareNotFound ServiceCodeType = "ShareNotFound"
|
||||
|
||||
// The specified resource may be in use by an SMB client (409).
|
||||
ServiceCodeSharingViolation ServiceCodeType = "SharingViolation"
|
||||
|
||||
// Another Share Snapshot operation is in progress (409).
|
||||
ServiceCodeShareSnapshotInProgress ServiceCodeType = "ShareSnapshotInProgress"
|
||||
|
||||
// The total number of snapshots for the share is over the limit (409).
|
||||
ServiceCodeShareSnapshotCountExceeded ServiceCodeType = "ShareSnapshotCountExceeded"
|
||||
|
||||
// The operation is not supported on a share snapshot (400).
|
||||
ServiceCodeShareSnapshotOperationNotSupported ServiceCodeType = "ShareSnapshotOperationNotSupported"
|
||||
|
||||
// The share has snapshots and the operation requires no snapshots (409).
|
||||
ServiceCodeShareHasSnapshots ServiceCodeType = "ShareHasSnapshots"
|
||||
)
|
||||
147
vendor/github.com/Azure/azure-storage-file-go/azfile/url_directory.go
generated
vendored
Normal file
147
vendor/github.com/Azure/azure-storage-file-go/azfile/url_directory.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// A DirectoryURL represents a URL to the Azure Storage directory allowing you to manipulate its directories and files.
|
||||
type DirectoryURL struct {
|
||||
directoryClient directoryClient
|
||||
}
|
||||
|
||||
// NewDirectoryURL creates a DirectoryURL object using the specified URL and request policy pipeline.
|
||||
// Note: p can't be nil.
|
||||
func NewDirectoryURL(url url.URL, p pipeline.Pipeline) DirectoryURL {
|
||||
directoryClient := newDirectoryClient(url, p)
|
||||
return DirectoryURL{directoryClient: directoryClient}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the DirectoryURL object.
|
||||
func (d DirectoryURL) URL() url.URL {
|
||||
return d.directoryClient.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (d DirectoryURL) String() string {
|
||||
u := d.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new DirectoryURL object identical to the source but with the specified request policy pipeline.
|
||||
func (d DirectoryURL) WithPipeline(p pipeline.Pipeline) DirectoryURL {
|
||||
return NewDirectoryURL(d.URL(), p)
|
||||
}
|
||||
|
||||
// NewFileURL creates a new FileURL object by concatenating fileName to the end of
|
||||
// DirectoryURL's URL. The new FileURL uses the same request policy pipeline as the DirectoryURL.
|
||||
// To change the pipeline, create the FileURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewFileURL instead of calling this object's
|
||||
// NewFileURL method.
|
||||
func (d DirectoryURL) NewFileURL(fileName string) FileURL {
|
||||
fileURL := appendToURLPath(d.URL(), fileName)
|
||||
return NewFileURL(fileURL, d.directoryClient.Pipeline())
|
||||
}
|
||||
|
||||
// NewDirectoryURL creates a new DirectoryURL object by concatenating directoryName to the end of
|
||||
// DirectoryURL's URL. The new DirectoryURL uses the same request policy pipeline as the DirectoryURL.
|
||||
// To change the pipeline, create the DirectoryURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewDirectoryURL instead of calling this object's
|
||||
// NewDirectoryURL method.
|
||||
func (d DirectoryURL) NewDirectoryURL(directoryName string) DirectoryURL {
|
||||
directoryURL := appendToURLPath(d.URL(), directoryName)
|
||||
return NewDirectoryURL(directoryURL, d.directoryClient.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a new directory within a storage account.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-directory.
|
||||
// Pass default values for SMB properties (ex: "None" for file attributes).
|
||||
func (d DirectoryURL) Create(ctx context.Context, metadata Metadata) (*DirectoryCreateResponse, error) {
|
||||
defaultPermissions := "inherit"
|
||||
return d.directoryClient.Create(ctx, "None", "now", "now", nil, metadata,
|
||||
&defaultPermissions, nil)
|
||||
}
|
||||
|
||||
// Delete removes the specified empty directory. Note that the directory must be empty before it can be deleted..
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-directory.
|
||||
func (d DirectoryURL) Delete(ctx context.Context) (*DirectoryDeleteResponse, error) {
|
||||
return d.directoryClient.Delete(ctx, nil)
|
||||
}
|
||||
|
||||
// GetProperties returns the directory's metadata and system properties.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-directory-properties.
|
||||
func (d DirectoryURL) GetProperties(ctx context.Context) (*DirectoryGetPropertiesResponse, error) {
|
||||
return d.directoryClient.GetProperties(ctx, nil, nil)
|
||||
}
|
||||
|
||||
// SetMetadata sets the directory's metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-directory-metadata.
|
||||
func (d DirectoryURL) SetMetadata(ctx context.Context, metadata Metadata) (*DirectorySetMetadataResponse, error) {
|
||||
return d.directoryClient.SetMetadata(ctx, nil, metadata)
|
||||
}
|
||||
|
||||
// ListFilesAndDirectoriesOptions defines options available when calling ListFilesAndDirectoriesSegment.
|
||||
type ListFilesAndDirectoriesOptions struct {
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
}
|
||||
|
||||
func (o *ListFilesAndDirectoriesOptions) pointers() (prefix *string, maxResults *int32) {
|
||||
if o.Prefix != "" {
|
||||
prefix = &o.Prefix
|
||||
}
|
||||
if o.MaxResults != 0 {
|
||||
maxResults = &o.MaxResults
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// toConvenienceModel convert raw response to convenience model.
|
||||
// func (r *listFilesAndDirectoriesSegmentResponse) toConvenienceModel() *ListFilesAndDirectoriesSegmentResponse {
|
||||
// cr := ListFilesAndDirectoriesSegmentResponse{
|
||||
// rawResponse: r.rawResponse,
|
||||
// ServiceEndpoint: r.ServiceEndpoint,
|
||||
// ShareName: r.ShareName,
|
||||
// ShareSnapshot: r.ShareSnapshot,
|
||||
// DirectoryPath: r.DirectoryPath,
|
||||
// Prefix: r.Prefix,
|
||||
// Marker: r.Marker,
|
||||
// MaxResults: r.MaxResults,
|
||||
// NextMarker: r.NextMarker,
|
||||
// }
|
||||
|
||||
// for _, e := range r.Entries {
|
||||
// if f, isFile := e.AsFileEntry(); isFile {
|
||||
// cr.Files = append(cr.Files, *f)
|
||||
// } else if d, isDir := e.AsDirectoryEntry(); isDir {
|
||||
// cr.Directories = append(cr.Directories, *d)
|
||||
// } else {
|
||||
// // Logic should not be here, otherwise client is not aligning to latest REST API document
|
||||
// panic(fmt.Errorf("invalid entry type found, entry info: %v", e))
|
||||
// }
|
||||
|
||||
// }
|
||||
|
||||
// return &cr
|
||||
// }
|
||||
|
||||
// ListFilesAndDirectoriesSegmentAutoRest is the implementation using Auto Rest generated protocol code.
|
||||
// func (d DirectoryURL) ListFilesAndDirectoriesSegmentAutoRest(ctx context.Context, marker Marker, o ListFilesAndDirectoriesOptions) (*ListFilesAndDirectoriesSegmentResponse, error) {
|
||||
// prefix, maxResults := o.pointers()
|
||||
|
||||
// rawResponse, error := d.directoryClient.ListFilesAndDirectoriesSegmentAutoRest(ctx, prefix, nil, marker.val, maxResults, nil)
|
||||
|
||||
// return rawResponse.toConvenienceModel(), error
|
||||
// }
|
||||
|
||||
// ListFilesAndDirectoriesSegment returns a single segment of files and directories starting from the specified Marker.
|
||||
// Use an empty Marker to start enumeration from the beginning. File and directory names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListFilesAndDirectoriesSegment again (passing the the previously-returned
|
||||
// Marker) to get the next segment. This method lists the contents only for a single level of the directory hierarchy.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files.
|
||||
func (d DirectoryURL) ListFilesAndDirectoriesSegment(ctx context.Context, marker Marker, o ListFilesAndDirectoriesOptions) (*ListFilesAndDirectoriesSegmentResponse, error) {
|
||||
prefix, maxResults := o.pointers()
|
||||
return d.directoryClient.ListFilesAndDirectoriesSegment(ctx, prefix, nil, marker.Val, maxResults, nil)
|
||||
}
|
||||
211
vendor/github.com/Azure/azure-storage-file-go/azfile/url_file.go
generated
vendored
Normal file
211
vendor/github.com/Azure/azure-storage-file-go/azfile/url_file.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
fileType = "file"
|
||||
|
||||
// FileMaxUploadRangeBytes indicates the maximum number of bytes that can be sent in a call to UploadRange.
|
||||
FileMaxUploadRangeBytes = 4 * 1024 * 1024 // 4MB
|
||||
|
||||
// FileMaxSizeInBytes indicates the maxiumum file size, in bytes.
|
||||
FileMaxSizeInBytes int64 = 1 * 1024 * 1024 * 1024 * 1024 // 1TB
|
||||
)
|
||||
|
||||
// A FileURL represents a URL to an Azure Storage file.
|
||||
type FileURL struct {
|
||||
fileClient fileClient
|
||||
}
|
||||
|
||||
// NewFileURL creates a FileURL object using the specified URL and request policy pipeline.
|
||||
// Note: p can't be nil.
|
||||
func NewFileURL(url url.URL, p pipeline.Pipeline) FileURL {
|
||||
fileClient := newFileClient(url, p)
|
||||
return FileURL{fileClient: fileClient}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the FileURL object.
|
||||
func (f FileURL) URL() url.URL {
|
||||
return f.fileClient.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (f FileURL) String() string {
|
||||
u := f.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new FileURL object identical to the source but with the specified request policy pipeline.
|
||||
func (f FileURL) WithPipeline(p pipeline.Pipeline) FileURL {
|
||||
return NewFileURL(f.fileClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new FileURL object identical to the source but with the specified share snapshot timestamp.
|
||||
// Pass time.Time{} to remove the share snapshot returning a URL to the base file.
|
||||
func (f FileURL) WithSnapshot(shareSnapshot string) FileURL {
|
||||
p := NewFileURLParts(f.URL())
|
||||
p.ShareSnapshot = shareSnapshot
|
||||
return NewFileURL(p.URL(), f.fileClient.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a new file or replaces a file. Note that this method only initializes the file.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/create-file.
|
||||
// Pass default values for SMB properties (ex: "None" for file attributes).
|
||||
func (f FileURL) Create(ctx context.Context, size int64, h FileHTTPHeaders, metadata Metadata) (*FileCreateResponse, error) {
|
||||
defaultPermissions := "inherit"
|
||||
return f.fileClient.Create(ctx, size, "None", "now", "now", nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, &h.CacheControl,
|
||||
h.ContentMD5, &h.ContentDisposition, metadata, &defaultPermissions, nil)
|
||||
}
|
||||
|
||||
// StartCopy copies the data at the source URL to a file.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-file.
|
||||
func (f FileURL) StartCopy(ctx context.Context, source url.URL, metadata Metadata) (*FileStartCopyResponse, error) {
|
||||
return f.fileClient.StartCopy(ctx, source.String(), nil, metadata)
|
||||
}
|
||||
|
||||
// AbortCopy stops a pending copy that was previously started and leaves a destination file with 0 length and metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-file.
|
||||
func (f FileURL) AbortCopy(ctx context.Context, copyID string) (*FileAbortCopyResponse, error) {
|
||||
return f.fileClient.AbortCopy(ctx, copyID, nil)
|
||||
}
|
||||
|
||||
// Download downloads count bytes of data from the start offset.
|
||||
// The response includes all of the file’s properties. However, passing true for rangeGetContentMD5 returns the range’s MD5 in the ContentMD5
|
||||
// response header/property if the range is <= 4MB; the HTTP request fails with 400 (Bad Request) if the requested range is greater than 4MB.
|
||||
// Note: offset must be >=0, count must be >= 0.
|
||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
||||
// rangeGetContentMD5 only works with partial data downloading.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-file.
|
||||
func (f FileURL) Download(ctx context.Context, offset int64, count int64, rangeGetContentMD5 bool) (*RetryableDownloadResponse, error) {
|
||||
var xRangeGetContentMD5 *bool
|
||||
if rangeGetContentMD5 {
|
||||
if offset == 0 && count == CountToEnd {
|
||||
return nil, errors.New("invalid argument, rangeGetContentMD5 only works with partial data downloading")
|
||||
}
|
||||
xRangeGetContentMD5 = &rangeGetContentMD5
|
||||
}
|
||||
dr, err := f.fileClient.Download(ctx, nil, httpRange{offset: offset, count: count}.pointers(), xRangeGetContentMD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RetryableDownloadResponse{
|
||||
f: f,
|
||||
dr: dr,
|
||||
ctx: ctx,
|
||||
info: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, // TODO: Note conditional header is not currently supported in Azure File.
|
||||
}, err
|
||||
}
|
||||
|
||||
// Body constructs a stream to read data from with a resilient reader option.
|
||||
// A zero-value option means to get a raw stream.
|
||||
func (dr *RetryableDownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
|
||||
if o.MaxRetryRequests == 0 {
|
||||
return dr.Response().Body
|
||||
}
|
||||
|
||||
return NewRetryReader(
|
||||
dr.ctx,
|
||||
dr.Response(),
|
||||
dr.info,
|
||||
o,
|
||||
func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) {
|
||||
resp, err := dr.f.Download(ctx, info.Offset, info.Count, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Response(), err
|
||||
})
|
||||
}
|
||||
|
||||
// Delete immediately removes the file from the storage account.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-file2.
|
||||
func (f FileURL) Delete(ctx context.Context) (*FileDeleteResponse, error) {
|
||||
return f.fileClient.Delete(ctx, nil)
|
||||
}
|
||||
|
||||
// GetProperties returns the file's metadata and properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-file-properties.
|
||||
func (f FileURL) GetProperties(ctx context.Context) (*FileGetPropertiesResponse, error) {
|
||||
return f.fileClient.GetProperties(ctx, nil, nil)
|
||||
}
|
||||
|
||||
// SetHTTPHeaders sets file's system properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-file-properties.
|
||||
func (f FileURL) SetHTTPHeaders(ctx context.Context, h FileHTTPHeaders) (*FileSetHTTPHeadersResponse, error) {
|
||||
defaultPermissions := "preserve"
|
||||
return f.fileClient.SetHTTPHeaders(ctx, "preserve", "preserve", "preserve", nil,
|
||||
nil, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, &h.CacheControl, h.ContentMD5,
|
||||
&h.ContentDisposition, &defaultPermissions, nil)
|
||||
}
|
||||
|
||||
// SetMetadata sets a file's metadata.
|
||||
// https://docs.microsoft.com/rest/api/storageservices/set-file-metadata.
|
||||
func (f FileURL) SetMetadata(ctx context.Context, metadata Metadata) (*FileSetMetadataResponse, error) {
|
||||
return f.fileClient.SetMetadata(ctx, nil, metadata)
|
||||
}
|
||||
|
||||
// Resize resizes the file to the specified size.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-file-properties.
|
||||
func (f FileURL) Resize(ctx context.Context, length int64) (*FileSetHTTPHeadersResponse, error) {
|
||||
defaultPermissions := "preserve"
|
||||
return f.fileClient.SetHTTPHeaders(ctx, "preserve", "preserve", "preserve", nil,
|
||||
&length, nil, nil, nil, nil,
|
||||
nil, nil, &defaultPermissions, nil)
|
||||
}
|
||||
|
||||
// UploadRange writes bytes to a file.
|
||||
// offset indicates the offset at which to begin writing, in bytes.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-range.
|
||||
func (f FileURL) UploadRange(ctx context.Context, offset int64, body io.ReadSeeker, transactionalMD5 []byte) (*FileUploadRangeResponse, error) {
|
||||
if body == nil {
|
||||
return nil, errors.New("invalid argument, body must not be nil")
|
||||
}
|
||||
|
||||
count := validateSeekableStreamAt0AndGetCount(body)
|
||||
if count == 0 {
|
||||
return nil, errors.New("invalid argument, body must contain readable data whose size is > 0")
|
||||
}
|
||||
|
||||
// TransactionalContentMD5 isn't supported currently.
|
||||
return f.fileClient.UploadRange(ctx, *toRange(offset, count), FileRangeWriteUpdate, count, body, nil, transactionalMD5)
|
||||
}
|
||||
|
||||
// Update range with bytes from a specific URL.
|
||||
// offset indicates the offset at which to begin writing, in bytes.
|
||||
func (f FileURL) UploadRangeFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64,
|
||||
count int64) (*FileUploadRangeFromURLResponse, error) {
|
||||
|
||||
return f.fileClient.UploadRangeFromURL(ctx, *toRange(destOffset, count), sourceURL.String(), 0, nil,
|
||||
toRange(sourceOffset, count), nil, nil, nil)
|
||||
}
|
||||
|
||||
// ClearRange clears the specified range and releases the space used in storage for that range.
|
||||
// offset means the start offset of the range to clear.
|
||||
// count means count of bytes to clean, it cannot be CountToEnd (0), and must be explictly specified.
|
||||
// If the range specified is not 512-byte aligned, the operation will write zeros to
|
||||
// the start or end of the range that is not 512-byte aligned and free the rest of the range inside that is 512-byte aligned.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-range.
|
||||
func (f FileURL) ClearRange(ctx context.Context, offset int64, count int64) (*FileUploadRangeResponse, error) {
|
||||
if count <= 0 {
|
||||
return nil, errors.New("invalid argument, count cannot be CountToEnd, and must be > 0")
|
||||
}
|
||||
|
||||
return f.fileClient.UploadRange(ctx, *toRange(offset, count), FileRangeWriteClear, 0, nil, nil, nil)
|
||||
}
|
||||
|
||||
// GetRangeList returns the list of valid ranges for a file.
|
||||
// Use a count with value CountToEnd (0) to indicate the left part of file start from offset.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/list-ranges.
|
||||
func (f FileURL) GetRangeList(ctx context.Context, offset int64, count int64) (*Ranges, error) {
|
||||
return f.fileClient.GetRangeList(ctx, nil, nil, httpRange{offset: offset, count: count}.pointers())
|
||||
}
|
||||
197
vendor/github.com/Azure/azure-storage-file-go/azfile/url_service.go
generated
vendored
Normal file
197
vendor/github.com/Azure/azure-storage-file-go/azfile/url_service.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// storageAnalyticsVersion indicates the version of Storage Analytics to configure. Use "1.0" for this value.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties.
|
||||
storageAnalyticsVersion = "1.0"
|
||||
)
|
||||
|
||||
// A ServiceURL represents a URL to the Azure Storage File service allowing you to manipulate file shares.
|
||||
type ServiceURL struct {
|
||||
client serviceClient
|
||||
}
|
||||
|
||||
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
||||
// Note: p can't be nil.
|
||||
func NewServiceURL(url url.URL, p pipeline.Pipeline) ServiceURL {
|
||||
client := newServiceClient(url, p)
|
||||
return ServiceURL{client: client}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ServiceURL object.
|
||||
func (s ServiceURL) URL() url.URL {
|
||||
return s.client.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (s ServiceURL) String() string {
|
||||
u := s.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
|
||||
func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL {
|
||||
return NewServiceURL(s.URL(), p)
|
||||
}
|
||||
|
||||
// NewShareURL creates a new ShareURL object by concatenating shareName to the end of
|
||||
// ServiceURL's URL. The new ShareURL uses the same request policy pipeline as the ServiceURL.
|
||||
// To change the pipeline, create the ShareURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewShareURL instead of calling this object's
|
||||
// NewShareURL method.
|
||||
func (s ServiceURL) NewShareURL(shareName string) ShareURL {
|
||||
shareURL := appendToURLPath(s.URL(), shareName)
|
||||
return NewShareURL(shareURL, s.client.Pipeline())
|
||||
}
|
||||
|
||||
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
|
||||
func appendToURLPath(u url.URL, name string) url.URL {
|
||||
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
|
||||
// When you call url.Parse() this is what you'll get:
|
||||
// Scheme: "https"
|
||||
// Opaque: ""
|
||||
// User: nil
|
||||
// Host: "ms.com"
|
||||
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
|
||||
// RawPath: ""
|
||||
// ForceQuery: false
|
||||
// RawQuery: "k1=v1&k2=v2"
|
||||
// Fragment: "f"
|
||||
if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' {
|
||||
u.Path += "/" // Append "/" to end before appending name
|
||||
}
|
||||
u.Path += name
|
||||
return u
|
||||
}
|
||||
|
||||
// ListSharesSegment returns a single segment of shares starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Share names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListSharesSegment again (passing the the previously-returned
|
||||
// Marker) to get the next segment. For more information, see
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/list-shares.
|
||||
func (s ServiceURL) ListSharesSegment(ctx context.Context, marker Marker, o ListSharesOptions) (*ListSharesResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return s.client.ListSharesSegment(ctx, prefix, marker.Val, maxResults, include, nil)
|
||||
}
|
||||
|
||||
// ListSharesOptions defines options available when calling ListSharesSegment.
|
||||
type ListSharesOptions struct {
|
||||
Detail ListSharesDetail // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
}
|
||||
|
||||
func (o *ListSharesOptions) pointers() (prefix *string, include []ListSharesIncludeType, maxResults *int32) {
|
||||
if o.Prefix != "" {
|
||||
prefix = &o.Prefix
|
||||
}
|
||||
if o.MaxResults != 0 {
|
||||
maxResults = &o.MaxResults
|
||||
}
|
||||
include = o.Detail.toArray()
|
||||
return
|
||||
}
|
||||
|
||||
// ListSharesDetail indicates what additional information the service should return with each share.
|
||||
type ListSharesDetail struct {
|
||||
Metadata, Snapshots bool
|
||||
}
|
||||
|
||||
// toArray produces the Include query parameter's value.
|
||||
func (d *ListSharesDetail) toArray() []ListSharesIncludeType {
|
||||
items := make([]ListSharesIncludeType, 0, 2)
|
||||
if d.Metadata {
|
||||
items = append(items, ListSharesIncludeMetadata)
|
||||
}
|
||||
if d.Snapshots {
|
||||
items = append(items, ListSharesIncludeSnapshots)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// toFsp converts StorageServiceProperties to convenience representation FileServiceProperties.
|
||||
// This method is added considering protocol layer's swagger unification purpose.
|
||||
func (ssp *StorageServiceProperties) toFsp() *FileServiceProperties {
|
||||
if ssp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &FileServiceProperties{
|
||||
rawResponse: ssp.rawResponse,
|
||||
HourMetrics: ssp.HourMetrics.toMp(),
|
||||
MinuteMetrics: ssp.MinuteMetrics.toMp(),
|
||||
Cors: ssp.Cors,
|
||||
}
|
||||
}
|
||||
|
||||
// toMp converts Metrics to convenience representation MetricProperties.
|
||||
// This method is added considering protocol layer's swagger unification purpose.
|
||||
func (m *Metrics) toMp() MetricProperties {
|
||||
mp := MetricProperties{}
|
||||
if m.Enabled {
|
||||
mp.MetricEnabled = true
|
||||
mp.IncludeAPIs = *m.IncludeAPIs
|
||||
if m.RetentionPolicy != nil && m.RetentionPolicy.Enabled {
|
||||
mp.RetentionPolicyEnabled = true
|
||||
mp.RetentionDays = *m.RetentionPolicy.Days
|
||||
}
|
||||
}
|
||||
|
||||
return mp
|
||||
}
|
||||
|
||||
// toSsp converts FileServiceProperties to convenience representation StorageServiceProperties.
|
||||
// This method is added considering protocol layer's swagger unification purpose.
|
||||
func (fsp *FileServiceProperties) toSsp() *StorageServiceProperties {
|
||||
if fsp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &StorageServiceProperties{
|
||||
rawResponse: fsp.rawResponse,
|
||||
HourMetrics: fsp.HourMetrics.toM(),
|
||||
MinuteMetrics: fsp.MinuteMetrics.toM(),
|
||||
Cors: fsp.Cors,
|
||||
}
|
||||
}
|
||||
|
||||
// toM converts MetricProperties to Metrics.
|
||||
// This method is added considering protocol layer's swagger unification purpose.
|
||||
func (mp MetricProperties) toM() *Metrics {
|
||||
m := Metrics{
|
||||
Version: storageAnalyticsVersion,
|
||||
RetentionPolicy: &RetentionPolicy{}} // Note: Version and RetentionPolicy are actually mandatory.
|
||||
|
||||
if mp.MetricEnabled {
|
||||
m.Enabled = true
|
||||
m.IncludeAPIs = &mp.IncludeAPIs
|
||||
if mp.RetentionPolicyEnabled {
|
||||
m.RetentionPolicy.Enabled = true
|
||||
m.RetentionPolicy.Days = &mp.RetentionDays
|
||||
}
|
||||
}
|
||||
|
||||
return &m
|
||||
}
|
||||
|
||||
// GetProperties returns the properties of the File service.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-file-service-properties.
|
||||
func (s ServiceURL) GetProperties(ctx context.Context) (*FileServiceProperties, error) {
|
||||
ssp, error := s.client.GetProperties(ctx, nil)
|
||||
|
||||
return ssp.toFsp(), error
|
||||
}
|
||||
|
||||
// SetProperties sets the properties of the File service.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties.
|
||||
func (s ServiceURL) SetProperties(ctx context.Context, properties FileServiceProperties) (*ServiceSetPropertiesResponse, error) {
|
||||
return s.client.SetProperties(ctx, *properties.toSsp(), nil)
|
||||
}
|
||||
166
vendor/github.com/Azure/azure-storage-file-go/azfile/url_share.go
generated
vendored
Normal file
166
vendor/github.com/Azure/azure-storage-file-go/azfile/url_share.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// A ShareURL represents a URL to the Azure Storage share allowing you to manipulate its directories and files.
|
||||
type ShareURL struct {
|
||||
shareClient shareClient
|
||||
}
|
||||
|
||||
// NewShareURL creates a ShareURL object using the specified URL and request policy pipeline.
|
||||
// Note: p can't be nil.
|
||||
func NewShareURL(url url.URL, p pipeline.Pipeline) ShareURL {
|
||||
shareClient := newShareClient(url, p)
|
||||
return ShareURL{shareClient: shareClient}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ShareURL object.
|
||||
func (s ShareURL) URL() url.URL {
|
||||
return s.shareClient.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (s ShareURL) String() string {
|
||||
u := s.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ShareURL object identical to the source but with the specified request policy pipeline.
|
||||
func (s ShareURL) WithPipeline(p pipeline.Pipeline) ShareURL {
|
||||
return NewShareURL(s.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new ShareURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass time.Time{} to remove the snapshot returning a URL to the base share.
|
||||
func (s ShareURL) WithSnapshot(snapshot string) ShareURL {
|
||||
p := NewFileURLParts(s.URL())
|
||||
p.ShareSnapshot = snapshot
|
||||
return NewShareURL(p.URL(), s.shareClient.Pipeline())
|
||||
}
|
||||
|
||||
// NewDirectoryURL creates a new DirectoryURL object by concatenating directoryName to the end of
|
||||
// ShareURL's URL. The new DirectoryURL uses the same request policy pipeline as the ShareURL.
|
||||
// To change the pipeline, create the DirectoryURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewDirectoryURL instead of calling this object's
|
||||
// NewDirectoryURL method.
|
||||
func (s ShareURL) NewDirectoryURL(directoryName string) DirectoryURL {
|
||||
directoryURL := appendToURLPath(s.URL(), directoryName)
|
||||
return NewDirectoryURL(directoryURL, s.shareClient.Pipeline())
|
||||
}
|
||||
|
||||
// NewRootDirectoryURL creates a new DirectoryURL object using ShareURL's URL.
|
||||
// The new DirectoryURL uses the same request policy pipeline as the
|
||||
// ShareURL. To change the pipeline, create the DirectoryURL and then call its WithPipeline method
|
||||
// passing in the desired pipeline object. Or, call NewDirectoryURL instead of calling the NewDirectoryURL method.
|
||||
func (s ShareURL) NewRootDirectoryURL() DirectoryURL {
|
||||
return NewDirectoryURL(s.URL(), s.shareClient.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a new share within a storage account. If a share with the same name already exists, the operation fails.
|
||||
// quotaInGB specifies the maximum size of the share in gigabytes, 0 means you accept service's default quota.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-share.
|
||||
func (s ShareURL) Create(ctx context.Context, metadata Metadata, quotaInGB int32) (*ShareCreateResponse, error) {
|
||||
var quota *int32
|
||||
if quotaInGB != 0 {
|
||||
quota = "aInGB
|
||||
}
|
||||
return s.shareClient.Create(ctx, nil, metadata, quota)
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a read-only snapshot of a share.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/snapshot-share.
|
||||
func (s ShareURL) CreateSnapshot(ctx context.Context, metadata Metadata) (*ShareCreateSnapshotResponse, error) {
|
||||
return s.shareClient.CreateSnapshot(ctx, nil, metadata)
|
||||
}
|
||||
|
||||
// Delete marks the specified share or share snapshot for deletion.
|
||||
// The share or share snapshot and any files contained within it are later deleted during garbage collection.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-share.
|
||||
func (s ShareURL) Delete(ctx context.Context, deleteSnapshotsOption DeleteSnapshotsOptionType) (*ShareDeleteResponse, error) {
|
||||
return s.shareClient.Delete(ctx, nil, nil, deleteSnapshotsOption)
|
||||
}
|
||||
|
||||
// GetProperties returns all user-defined metadata and system properties for the specified share or share snapshot.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-share-properties.
|
||||
func (s ShareURL) GetProperties(ctx context.Context) (*ShareGetPropertiesResponse, error) {
|
||||
return s.shareClient.GetProperties(ctx, nil, nil)
|
||||
}
|
||||
|
||||
// SetQuota sets service-defined properties for the specified share.
|
||||
// quotaInGB specifies the maximum size of the share in gigabytes, 0 means no quote and uses service's default value.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-share-properties.
|
||||
func (s ShareURL) SetQuota(ctx context.Context, quotaInGB int32) (*ShareSetQuotaResponse, error) {
|
||||
var quota *int32
|
||||
if quotaInGB != 0 {
|
||||
quota = "aInGB
|
||||
}
|
||||
return s.shareClient.SetQuota(ctx, nil, quota)
|
||||
}
|
||||
|
||||
// SetMetadata sets the share's metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-share-metadata.
|
||||
func (s ShareURL) SetMetadata(ctx context.Context, metadata Metadata) (*ShareSetMetadataResponse, error) {
|
||||
return s.shareClient.SetMetadata(ctx, nil, metadata)
|
||||
}
|
||||
|
||||
// GetPermissions returns information about stored access policies specified on the share.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-share-acl.
|
||||
func (s ShareURL) GetPermissions(ctx context.Context) (*SignedIdentifiers, error) {
|
||||
return s.shareClient.GetAccessPolicy(ctx, nil)
|
||||
}
|
||||
|
||||
// The AccessPolicyPermission type simplifies creating the permissions string for a share's access policy.
|
||||
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
|
||||
type AccessPolicyPermission struct {
|
||||
Read, Create, Write, Delete, List bool
|
||||
}
|
||||
|
||||
// String produces the access policy permission string for an Azure Storage share.
|
||||
// Call this method to set AccessPolicy's Permission field.
|
||||
func (p AccessPolicyPermission) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
b.WriteRune('l')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccessPolicyPermission's fields from a string.
|
||||
func (p *AccessPolicyPermission) Parse(s string) {
|
||||
p.Read = strings.ContainsRune(s, 'r')
|
||||
p.Create = strings.ContainsRune(s, 'c')
|
||||
p.Write = strings.ContainsRune(s, 'w')
|
||||
p.Delete = strings.ContainsRune(s, 'd')
|
||||
p.List = strings.ContainsRune(s, 'l')
|
||||
}
|
||||
|
||||
// SetPermissions sets a stored access policy for use with shared access signatures.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-share-acl.
|
||||
func (s ShareURL) SetPermissions(ctx context.Context, permissions []SignedIdentifier) (*ShareSetAccessPolicyResponse, error) {
|
||||
return s.shareClient.SetAccessPolicy(ctx, permissions, nil)
|
||||
}
|
||||
|
||||
// GetStatistics retrieves statistics related to the share.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-share-stats.
|
||||
func (s ShareURL) GetStatistics(ctx context.Context) (*ShareStats, error) {
|
||||
return s.shareClient.GetStatistics(ctx, nil)
|
||||
}
|
||||
3
vendor/github.com/Azure/azure-storage-file-go/azfile/version.go
generated
vendored
Normal file
3
vendor/github.com/Azure/azure-storage-file-go/azfile/version.go
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
package azfile
|
||||
|
||||
const serviceLibVersion = "0.6.0"
|
||||
55
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_anonymous.go
generated
vendored
Normal file
55
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_anonymous.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// Credential represent any credential type; it is used to create a credential policy Factory.
|
||||
type Credential interface {
|
||||
pipeline.Factory
|
||||
credentialMarker()
|
||||
}
|
||||
|
||||
type credentialFunc pipeline.FactoryFunc
|
||||
|
||||
func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return f(next, po)
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (credentialFunc) credentialMarker() {}
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
|
||||
// or for use with Shared Access Signatures (SAS).
|
||||
func NewAnonymousCredential() Credential {
|
||||
return anonymousCredentialFactory
|
||||
}
|
||||
|
||||
var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
|
||||
|
||||
// anonymousCredentialPolicyFactory is the credential's policy factory.
|
||||
type anonymousCredentialPolicyFactory struct {
|
||||
}
|
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return &anonymousCredentialPolicy{next: next}
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*anonymousCredentialPolicyFactory) credentialMarker() {}
|
||||
|
||||
// anonymousCredentialPolicy is the credential's policy object.
|
||||
type anonymousCredentialPolicy struct {
|
||||
next pipeline.Policy
|
||||
}
|
||||
|
||||
// Do implements the credential's policy interface.
|
||||
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// For anonymous credentials, this is effectively a no-op
|
||||
return p.next.Do(ctx, request)
|
||||
}
|
||||
187
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_shared_key.go
generated
vendored
Normal file
187
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_credential_shared_key.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
||||
// storage account's name and either its primary or secondary key.
|
||||
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
|
||||
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
return &SharedKeyCredential{}, err
|
||||
}
|
||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
|
||||
}
|
||||
|
||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
||||
// It is immutable making it shareable and goroutine-safe.
|
||||
type SharedKeyCredential struct {
|
||||
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
|
||||
accountName string
|
||||
accountKey []byte
|
||||
}
|
||||
|
||||
// AccountName returns the Storage account's name.
|
||||
func (f SharedKeyCredential) AccountName() string {
|
||||
return f.accountName
|
||||
}
|
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// Add a x-ms-date header if it doesn't already exist
|
||||
if d := request.Header.Get(headerXmsDate); d == "" {
|
||||
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
|
||||
}
|
||||
stringToSign := f.buildStringToSign(request)
|
||||
signature := f.ComputeHMACSHA256(stringToSign)
|
||||
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
|
||||
request.Header[headerAuthorization] = []string{authHeader}
|
||||
|
||||
response, err := next.Do(ctx, request)
|
||||
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
|
||||
// Service failed to authenticate request, log it
|
||||
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
|
||||
}
|
||||
return response, err
|
||||
})
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*SharedKeyCredential) credentialMarker() {}
|
||||
|
||||
// Constants ensuring that header names are correctly spelled and consistently cased.
|
||||
const (
|
||||
headerAuthorization = "Authorization"
|
||||
headerCacheControl = "Cache-Control"
|
||||
headerContentEncoding = "Content-Encoding"
|
||||
headerContentDisposition = "Content-Disposition"
|
||||
headerContentLanguage = "Content-Language"
|
||||
headerContentLength = "Content-Length"
|
||||
headerContentMD5 = "Content-MD5"
|
||||
headerContentType = "Content-Type"
|
||||
headerDate = "Date"
|
||||
headerIfMatch = "If-Match"
|
||||
headerIfModifiedSince = "If-Modified-Since"
|
||||
headerIfNoneMatch = "If-None-Match"
|
||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
headerRange = "Range"
|
||||
headerUserAgent = "User-Agent"
|
||||
headerXmsDate = "x-ms-date"
|
||||
headerXmsVersion = "x-ms-version"
|
||||
)
|
||||
|
||||
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
||||
func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
|
||||
h := hmac.New(sha256.New, f.accountKey)
|
||||
h.Write([]byte(message))
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
headers := request.Header
|
||||
contentLength := headers.Get(headerContentLength)
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
request.Method,
|
||||
headers.Get(headerContentEncoding),
|
||||
headers.Get(headerContentLanguage),
|
||||
contentLength,
|
||||
headers.Get(headerContentMD5),
|
||||
headers.Get(headerContentType),
|
||||
"", // Empty date because x-ms-date is expected (as per web page above)
|
||||
headers.Get(headerIfModifiedSince),
|
||||
headers.Get(headerIfMatch),
|
||||
headers.Get(headerIfNoneMatch),
|
||||
headers.Get(headerIfUnmodifiedSince),
|
||||
headers.Get(headerRange),
|
||||
buildCanonicalizedHeader(headers),
|
||||
f.buildCanonicalizedResource(request.URL),
|
||||
}, "\n")
|
||||
return stringToSign
|
||||
}
|
||||
|
||||
func buildCanonicalizedHeader(headers http.Header) string {
|
||||
cm := map[string][]string{}
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
if strings.HasPrefix(headerName, "x-ms-") {
|
||||
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
|
||||
}
|
||||
}
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(cm))
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
ch := bytes.NewBufferString("")
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
ch.WriteRune('\n')
|
||||
}
|
||||
ch.WriteString(key)
|
||||
ch.WriteRune(':')
|
||||
ch.WriteString(strings.Join(cm[key], ","))
|
||||
}
|
||||
return string(ch.Bytes())
|
||||
}
|
||||
|
||||
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
cr := bytes.NewBufferString("/")
|
||||
cr.WriteString(f.accountName)
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
} else {
|
||||
// a slash is required to indicate the root path
|
||||
cr.WriteString("/")
|
||||
}
|
||||
|
||||
// params is a map[string][]string; param name is key; params values is []string
|
||||
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
||||
if err != nil {
|
||||
sanityCheckFailed(err.Error())
|
||||
}
|
||||
|
||||
if len(params) > 0 { // There is at least 1 query parameter
|
||||
paramNames := []string{} // We use this to sort the parameter key names
|
||||
for paramName := range params {
|
||||
paramNames = append(paramNames, paramName) // paramNames must be lowercase
|
||||
}
|
||||
sort.Strings(paramNames)
|
||||
|
||||
for _, paramName := range paramNames {
|
||||
paramValues := params[paramName]
|
||||
sort.Strings(paramValues)
|
||||
|
||||
// Join the sorted key values separated by ','
|
||||
// Then prepend "keyName:"; then add this string to the buffer
|
||||
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
|
||||
}
|
||||
}
|
||||
return string(cr.Bytes())
|
||||
}
|
||||
28
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_unix.go
generated
vendored
Normal file
28
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_unix.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, flags := unix.PROT_READ, unix.MAP_SHARED // Assume read-only
|
||||
if writable {
|
||||
prot, flags = unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED
|
||||
}
|
||||
addr, err := unix.Mmap(int(file.Fd()), offset, length, prot, flags)
|
||||
return mmf(addr), err
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
err := unix.Munmap(*m)
|
||||
*m = nil
|
||||
if err != nil {
|
||||
sanityCheckFailed(err.Error())
|
||||
}
|
||||
}
|
||||
40
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_windows.go
generated
vendored
Normal file
40
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_mmf_windows.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, access := uint32(windows.PAGE_READONLY), uint32(windows.FILE_MAP_READ) // Assume read-only
|
||||
if writable {
|
||||
prot, access = uint32(windows.PAGE_READWRITE), uint32(windows.FILE_MAP_WRITE)
|
||||
}
|
||||
maxSize := int64(offset + int64(length))
|
||||
hMMF, errno := windows.CreateFileMapping(windows.Handle(file.Fd()), nil, prot, uint32(maxSize>>32), uint32(maxSize&0xffffffff), nil)
|
||||
if hMMF == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
defer windows.CloseHandle(hMMF)
|
||||
addr, errno := windows.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
|
||||
m := mmf{}
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
|
||||
h.Data = addr
|
||||
h.Len = length
|
||||
h.Cap = h.Len
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
|
||||
*m = mmf{}
|
||||
err := windows.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
sanityCheckFailed(err.Error())
|
||||
}
|
||||
}
|
||||
44
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_pipeline.go
generated
vendored
Normal file
44
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_pipeline.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
|
||||
type PipelineOptions struct {
|
||||
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
|
||||
Log pipeline.LogOptions
|
||||
|
||||
// Retry configures the built-in retry policy behavior.
|
||||
Retry RetryOptions
|
||||
|
||||
// RequestLog configures the built-in request logging policy.
|
||||
RequestLog RequestLogOptions
|
||||
|
||||
// Telemetry configures the built-in telemetry policy behavior.
|
||||
Telemetry TelemetryOptions
|
||||
}
|
||||
|
||||
// NewPipeline creates a Pipeline using the specified credentials and options.
|
||||
// Note: c can't be nil.
|
||||
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
f := []pipeline.Factory{
|
||||
NewTelemetryPolicyFactory(o.Telemetry),
|
||||
NewUniqueRequestIDPolicyFactory(),
|
||||
NewRetryPolicyFactory(o.Retry),
|
||||
}
|
||||
|
||||
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
|
||||
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
|
||||
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
|
||||
// changes made by other factories (like UniqueRequestIDPolicyFactory)
|
||||
f = append(f, c)
|
||||
}
|
||||
f = append(f,
|
||||
NewRequestLogPolicyFactory(o.RequestLog),
|
||||
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
|
||||
|
||||
|
||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
|
||||
}
|
||||
182
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_request_log.go
generated
vendored
Normal file
182
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_request_log.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// RequestLogOptions configures the retry policy's behavior.
|
||||
type RequestLogOptions struct {
|
||||
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
|
||||
// duration (-1=no logging; 0=default threshold).
|
||||
LogWarningIfTryOverThreshold time.Duration
|
||||
}
|
||||
|
||||
func (o RequestLogOptions) defaults() RequestLogOptions {
|
||||
if o.LogWarningIfTryOverThreshold == 0 {
|
||||
// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
|
||||
// But this monitors the time to get the HTTP response; NOT the time to download the response body.
|
||||
o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
|
||||
func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
// These variables are per-policy; shared by multiple calls to Do
|
||||
var try int32
|
||||
operationStart := time.Now() // If this is the 1st try, record the operation state time
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
try++ // The first try is #1 (not #0)
|
||||
|
||||
// Log the outgoing request as informational
|
||||
if po.ShouldLog(pipeline.LogInfo) {
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try)
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil)
|
||||
po.Log(pipeline.LogInfo, b.String())
|
||||
}
|
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryStart := time.Now()
|
||||
response, err = next.Do(ctx, request) // Make the request
|
||||
tryEnd := time.Now()
|
||||
tryDuration := tryEnd.Sub(tryStart)
|
||||
opDuration := tryEnd.Sub(operationStart)
|
||||
|
||||
logLevel, forceLog := pipeline.LogInfo, false // Default logging information
|
||||
|
||||
// If the response took too long, we'll upgrade to warning.
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
||||
// Log a warning if the try duration exceeded the specified threshold
|
||||
logLevel, forceLog = pipeline.LogWarning, true
|
||||
}
|
||||
|
||||
if err == nil { // We got a response from the service
|
||||
sc := response.Response().StatusCode
|
||||
if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
|
||||
logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
|
||||
} else {
|
||||
// For other status codes, we leave the level as is.
|
||||
}
|
||||
} else { // This error did not get an HTTP response from the service; upgrade the severity to Error
|
||||
logLevel, forceLog = pipeline.LogError, true
|
||||
}
|
||||
|
||||
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
|
||||
// We're going to log this; build the string to log
|
||||
b := &bytes.Buffer{}
|
||||
slow := ""
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
||||
slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold)
|
||||
}
|
||||
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
|
||||
if err != nil { // This HTTP request did not get a response from the service
|
||||
fmt.Fprint(b, "REQUEST ERROR\n")
|
||||
} else {
|
||||
if logLevel == pipeline.LogError {
|
||||
fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
|
||||
} else {
|
||||
fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
|
||||
}
|
||||
}
|
||||
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err)
|
||||
if logLevel <= pipeline.LogError {
|
||||
b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
|
||||
}
|
||||
msg := b.String()
|
||||
|
||||
if forceLog {
|
||||
pipeline.ForceLog(logLevel, msg)
|
||||
}
|
||||
if shouldLog {
|
||||
po.Log(logLevel, msg)
|
||||
}
|
||||
}
|
||||
return response, err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||
if !sigFound {
|
||||
sigFound = strings.Contains(rawQuery, "&sig=")
|
||||
if !sigFound {
|
||||
return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
|
||||
}
|
||||
}
|
||||
// [?|&]sig= found, redact its value
|
||||
values, _ := url.ParseQuery(rawQuery)
|
||||
for name := range values {
|
||||
if strings.EqualFold(name, "sig") {
|
||||
values[name] = []string{"REDACTED"}
|
||||
}
|
||||
}
|
||||
return sigFound, values.Encode()
|
||||
}
|
||||
|
||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||
req = request.Copy()
|
||||
req.Request.URL.RawQuery = rawQuery
|
||||
}
|
||||
|
||||
return prepareRequestForServiceLogging(req)
|
||||
}
|
||||
|
||||
func stack() []byte {
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n := runtime.Stack(buf, false)
|
||||
if n < len(buf) {
|
||||
return buf[:n]
|
||||
}
|
||||
buf = make([]byte, 2*len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Redact phase useful for blob and file service only. For other services,
|
||||
// this method can directly return request.Request.
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
|
||||
req = request.Copy()
|
||||
url, err := url.Parse(req.Header.Get(key))
|
||||
if err == nil {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
||||
url.RawQuery = rawQuery
|
||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return req.Request
|
||||
}
|
||||
|
||||
const xMsCopySourceHeader = "x-ms-copy-source"
|
||||
|
||||
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
|
||||
for keyInHeader := range header {
|
||||
if strings.EqualFold(keyInHeader, key) {
|
||||
return true, keyInHeader
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
403
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_retry.go
generated
vendored
Normal file
403
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_retry.go
generated
vendored
Normal file
@ -0,0 +1,403 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
||||
type RetryPolicy int32
|
||||
|
||||
const (
|
||||
// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
|
||||
RetryPolicyExponential RetryPolicy = 0
|
||||
|
||||
// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
|
||||
RetryPolicyFixed RetryPolicy = 1
|
||||
)
|
||||
|
||||
// RetryOptions configures the retry policy's behavior.
|
||||
type RetryOptions struct {
|
||||
// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
|
||||
// A value of zero means that you accept our default policy.
|
||||
Policy RetryPolicy
|
||||
|
||||
// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
|
||||
// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
|
||||
MaxTries int32
|
||||
|
||||
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
|
||||
// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
|
||||
// of data, the default TryTimeout will probably not be sufficient. You should override this value
|
||||
// based on the bandwidth available to the host machine and proximity to the Storage service. A good
|
||||
// starting point may be something like (60 seconds per MB of anticipated-payload-size).
|
||||
TryTimeout time.Duration
|
||||
|
||||
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
|
||||
// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
|
||||
// with each retry up to a maximum specified by MaxRetryDelay.
|
||||
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
|
||||
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
|
||||
// equal to or greater than RetryDelay.
|
||||
RetryDelay time.Duration
|
||||
|
||||
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
|
||||
// If you specify 0, then you must also specify 0 for RetryDelay.
|
||||
MaxRetryDelay time.Duration
|
||||
}
|
||||
|
||||
func (o RetryOptions) retryReadsFromSecondaryHost() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (o RetryOptions) defaults() RetryOptions {
|
||||
// We assume the following:
|
||||
// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
|
||||
// 2. o.MaxTries >= 0
|
||||
// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
|
||||
// 4. o.RetryDelay <= o.MaxRetryDelay
|
||||
// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
|
||||
|
||||
IfDefault := func(current *time.Duration, desired time.Duration) {
|
||||
if *current == time.Duration(0) {
|
||||
*current = desired
|
||||
}
|
||||
}
|
||||
|
||||
// Set defaults if unspecified
|
||||
if o.MaxTries == 0 {
|
||||
o.MaxTries = 4
|
||||
}
|
||||
switch o.Policy {
|
||||
case RetryPolicyExponential:
|
||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
||||
IfDefault(&o.RetryDelay, 4*time.Second)
|
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
||||
|
||||
case RetryPolicyFixed:
|
||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
||||
IfDefault(&o.RetryDelay, 30*time.Second)
|
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
|
||||
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
|
||||
var result int64 = 1
|
||||
for n := int32(0); n < exponent; n++ {
|
||||
result *= number
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
delay := time.Duration(0)
|
||||
switch o.Policy {
|
||||
case RetryPolicyExponential:
|
||||
delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay
|
||||
|
||||
case RetryPolicyFixed:
|
||||
if try > 1 { // Any try after the 1st uses the fixed delay
|
||||
delay = o.RetryDelay
|
||||
}
|
||||
}
|
||||
|
||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
||||
if delay > o.MaxRetryDelay {
|
||||
delay = o.MaxRetryDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
|
||||
func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
// Before each try, we'll select either the primary or secondary URL.
|
||||
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
|
||||
|
||||
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
|
||||
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
|
||||
|
||||
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
|
||||
// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
|
||||
// If using a secondary:
|
||||
// Even tries go against primary; odd tries go against the secondary
|
||||
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
|
||||
// If secondary gets a 404, don't fail, retry but future retries are only against the primary
|
||||
// When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
|
||||
for try := int32(1); try <= o.MaxTries; try++ {
|
||||
logf("\n=====> Try=%d\n", try)
|
||||
|
||||
// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
|
||||
tryingPrimary := !considerSecondary || (try%2 == 1)
|
||||
// Select the correct host and delay
|
||||
if tryingPrimary {
|
||||
primaryTry++
|
||||
delay := o.calcDelay(primaryTry)
|
||||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
||||
time.Sleep(delay) // The 1st try returns 0 delay
|
||||
} else {
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
|
||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
||||
}
|
||||
|
||||
// Clone the original request to ensure that each try starts with the original (unmutated) request.
|
||||
requestCopy := request.Copy()
|
||||
|
||||
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
||||
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
||||
// 1st try as for additional tries.
|
||||
if err = requestCopy.RewindBody(); err != nil {
|
||||
sanityCheckFailed(err.Error())
|
||||
}
|
||||
if !tryingPrimary {
|
||||
requestCopy.URL.Host = o.retryReadsFromSecondaryHost()
|
||||
requestCopy.Host = o.retryReadsFromSecondaryHost()
|
||||
}
|
||||
|
||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
||||
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
|
||||
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
|
||||
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
|
||||
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t)
|
||||
if t < timeout {
|
||||
timeout = t
|
||||
}
|
||||
if timeout < 0 {
|
||||
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
|
||||
}
|
||||
logf("TryTimeout adjusted to=%d sec\n", timeout)
|
||||
}
|
||||
q := requestCopy.Request.URL.Query()
|
||||
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
|
||||
requestCopy.Request.URL.RawQuery = q.Encode()
|
||||
logf("Url=%s\n", requestCopy.Request.URL.String())
|
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout))
|
||||
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
|
||||
response, err = next.Do(tryCtx, requestCopy) // Make the request
|
||||
/*err = improveDeadlineExceeded(err)
|
||||
if err == nil {
|
||||
response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body}
|
||||
}*/
|
||||
logf("Err=%v, response=%v\n", err, response)
|
||||
|
||||
action := "" // This MUST get changed within the switch code below
|
||||
switch {
|
||||
case ctx.Err() != nil:
|
||||
action = "NoRetry: Op timeout"
|
||||
case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound:
|
||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||
// the resource was not found. This may be due to replication delay. So, in this
|
||||
// case, we'll never try the secondary again for this operation.
|
||||
considerSecondary = false
|
||||
action = "Retry: Secondary URL returned 404"
|
||||
case err != nil:
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||
// ServiceCode is set only when error related to storage service happened.
|
||||
if stErr, ok := err.(StorageError); ok {
|
||||
if stErr.Temporary() {
|
||||
action = "Retry: StorageError with error service code and Temporary()"
|
||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||
action = "Retry: StorageError with success status code"
|
||||
} else {
|
||||
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
||||
}
|
||||
} else if netErr, ok := err.(net.Error); ok {
|
||||
// Use non-retriable net.Error list, but not retriable list.
|
||||
// As there are errors without Temporary() implementation,
|
||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||
if !isNotRetriable(netErr) {
|
||||
action = "Retry: net.Error and not in the non-retriable list"
|
||||
} else {
|
||||
action = "NoRetry: net.Error and in the non-retriable list"
|
||||
}
|
||||
} else {
|
||||
action = "NoRetry: unrecognized error"
|
||||
}
|
||||
default:
|
||||
action = "NoRetry: successful HTTP request" // no error
|
||||
}
|
||||
|
||||
logf("Action=%s\n", action)
|
||||
// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
|
||||
if action[0] != 'R' { // Retry only if action starts with 'R'
|
||||
if err != nil {
|
||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||
} else {
|
||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||
if response == nil || response.Response() == nil {
|
||||
// We do panic in the case response or response.Response() is nil,
|
||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||
// as in this case, current per-try has nothing to do in future.
|
||||
sanityCheckFailed("invalid state, response should not be nil when the operation is executed successfully")
|
||||
}
|
||||
|
||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
||||
}
|
||||
break // Don't retry
|
||||
}
|
||||
if response != nil && response.Response() != nil && response.Response().Body != nil {
|
||||
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
|
||||
body := response.Response().Body
|
||||
io.Copy(ioutil.Discard, body)
|
||||
body.Close()
|
||||
}
|
||||
// If retrying, cancel the current per-try timeout context
|
||||
tryCancel()
|
||||
}
|
||||
return response, err // Not retryable or too many retries; return the last response/error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||
type contextCancelReadCloser struct {
|
||||
cf context.CancelFunc
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.body.Read(p)
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Close() error {
|
||||
err := rc.body.Close()
|
||||
if rc.cf != nil {
|
||||
rc.cf()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||
func isNotRetriable(errToParse net.Error) bool {
|
||||
// No error, so this is NOT retriable.
|
||||
if errToParse == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||
if errToParse.Temporary() || errToParse.Timeout() {
|
||||
return false
|
||||
}
|
||||
|
||||
genericErr := error(errToParse)
|
||||
|
||||
// From here all the error are neither Temporary() nor Timeout().
|
||||
switch err := errToParse.(type) {
|
||||
case *net.OpError:
|
||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||
if err.Err == nil {
|
||||
return true
|
||||
}
|
||||
genericErr = err.Err
|
||||
}
|
||||
|
||||
switch genericErr.(type) {
|
||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
||||
// If the error is one of the ones listed, then it is NOT retriable.
|
||||
return true
|
||||
}
|
||||
|
||||
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
||||
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
||||
if strings.Contains(genericErr.Error(), "invalid header field") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Assume the error is retriable.
|
||||
return false
|
||||
}
|
||||
|
||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
||||
|
||||
func isSuccessStatusCode(resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, i := range successStatusCodes {
|
||||
if i == resp.StatusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||
var logf = func(format string, a ...interface{}) {}
|
||||
|
||||
// Use this version to see the retry method's code path (import "fmt")
|
||||
//var logf = fmt.Printf
|
||||
|
||||
/*
|
||||
type deadlineExceededReadCloser struct {
|
||||
r io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
|
||||
n, err := 0, io.EOF
|
||||
if r.r != nil {
|
||||
n, err = r.r.Read(p)
|
||||
}
|
||||
return n, improveDeadlineExceeded(err)
|
||||
}
|
||||
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
// For an HTTP request, the ReadCloser MUST also implement seek
|
||||
// For an HTTP response, Seek MUST not be called (or this will panic)
|
||||
o, err := r.r.(io.Seeker).Seek(offset, whence)
|
||||
return o, improveDeadlineExceeded(err)
|
||||
}
|
||||
func (r *deadlineExceededReadCloser) Close() error {
|
||||
if c, ok := r.r.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// timeoutError is the internal struct that implements our richer timeout error.
|
||||
type deadlineExceeded struct {
|
||||
responseError
|
||||
}
|
||||
|
||||
var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
|
||||
|
||||
// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
|
||||
func improveDeadlineExceeded(cause error) error {
|
||||
// If cause is not DeadlineExceeded, return the same error passed in.
|
||||
if cause != context.DeadlineExceeded {
|
||||
return cause
|
||||
}
|
||||
// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
|
||||
return &deadlineExceeded{
|
||||
responseError: responseError{
|
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *deadlineExceeded) Error() string {
|
||||
return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field")
|
||||
}
|
||||
*/
|
||||
51
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_telemetry.go
generated
vendored
Normal file
51
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_telemetry.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// TelemetryOptions configures the telemetry policy's behavior.
|
||||
type TelemetryOptions struct {
|
||||
// Value is a string prepended to each request's User-Agent and sent to the service.
|
||||
// The service records the user-agent in logs for diagnostics and tracking of client requests.
|
||||
Value string
|
||||
}
|
||||
|
||||
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
|
||||
// which add telemetry information to outgoing HTTP requests.
|
||||
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(o.Value)
|
||||
if b.Len() > 0 {
|
||||
b.WriteRune(' ')
|
||||
}
|
||||
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
|
||||
telemetryValue := b.String()
|
||||
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
request.Header.Set("User-Agent", telemetryValue)
|
||||
return next.Do(ctx, request)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: the ONLY function that should write to this variable is this func
|
||||
var platformInfo = func() string {
|
||||
// Azure-Storage/version (runtime; os type and version)”
|
||||
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
|
||||
operatingSystem := runtime.GOOS // Default OS string
|
||||
switch operatingSystem {
|
||||
case "windows":
|
||||
operatingSystem = os.Getenv("OS") // Get more specific OS information
|
||||
case "linux": // accept default OS info
|
||||
case "freebsd": // accept default OS info
|
||||
}
|
||||
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
|
||||
}()
|
||||
24
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_unique_request_id.go
generated
vendored
Normal file
24
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_policy_unique_request_id.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
|
||||
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
|
||||
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
// This is Policy's Do method:
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
id := request.Header.Get(xMsClientRequestID)
|
||||
if id == "" { // Add a unique request ID if the caller didn't specify one already
|
||||
request.Header.Set(xMsClientRequestID, newUUID().String())
|
||||
}
|
||||
return next.Do(ctx, request)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const xMsClientRequestID = "x-ms-client-request-id"
|
||||
185
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_retry_reader.go
generated
vendored
Normal file
185
vendor/github.com/Azure/azure-storage-file-go/azfile/zc_retry_reader.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package azfile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
|
||||
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
|
||||
|
||||
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
|
||||
// that should be used to make an HTTP GET request.
|
||||
type HTTPGetterInfo struct {
|
||||
// Offset specifies the start offset that should be used when
|
||||
// creating the HTTP GET request's Range header
|
||||
Offset int64
|
||||
|
||||
// Count specifies the count of bytes that should be used to calculate
|
||||
// the end offset when creating the HTTP GET request's Range header
|
||||
Count int64
|
||||
|
||||
// ETag specifies the resource's etag that should be used when creating
|
||||
// the HTTP GET request's If-Match header
|
||||
ETag ETag
|
||||
}
|
||||
|
||||
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
|
||||
|
||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||
type RetryReaderOptions struct {
|
||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||
// while reading from a RetryReader. A value of zero means that no additional HTTP
|
||||
// GET requests will be made.
|
||||
MaxRetryRequests int
|
||||
doInjectError bool
|
||||
doInjectErrorRound int
|
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier
|
||||
|
||||
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||
// treated as a fatal (non-retryable) error.
|
||||
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||
// which will be retried.
|
||||
TreatEarlyCloseAsError bool
|
||||
}
|
||||
|
||||
// retryReader implements io.ReaderCloser methods.
|
||||
// retryReader tries to read from response, and if there is retriable network error
|
||||
// returned during reading, it will retry according to retry reader option through executing
|
||||
// user defined action with provided data to get a new response, and continue the overall reading process
|
||||
// through reading from the new response.
|
||||
type retryReader struct {
|
||||
ctx context.Context
|
||||
info HTTPGetterInfo
|
||||
countWasBounded bool
|
||||
o RetryReaderOptions
|
||||
getter HTTPGetter
|
||||
|
||||
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||
responseMu *sync.Mutex
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewRetryReader creates a retry reader.
|
||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
||||
if getter == nil {
|
||||
panic("getter must not be nil")
|
||||
}
|
||||
if info.Count < 0 {
|
||||
panic("info.Count must be >= 0")
|
||||
}
|
||||
if o.MaxRetryRequests < 0 {
|
||||
panic("o.MaxRetryRequests must be >= 0")
|
||||
}
|
||||
return &retryReader{
|
||||
ctx: ctx,
|
||||
getter: getter,
|
||||
info: info,
|
||||
countWasBounded: info.Count != CountToEnd,
|
||||
response: initialResponse,
|
||||
responseMu: &sync.Mutex{},
|
||||
o: o}
|
||||
}
|
||||
|
||||
func (s *retryReader) setResponse(r *http.Response) {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
s.response = r
|
||||
}
|
||||
|
||||
func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
for try := 0; ; try++ {
|
||||
//fmt.Println(try) // Comment out for debugging.
|
||||
if s.countWasBounded && s.info.Count == CountToEnd {
|
||||
// User specified an original count and the remaining bytes are 0, return 0, EOF
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
s.responseMu.Lock()
|
||||
resp := s.response
|
||||
s.responseMu.Unlock()
|
||||
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||
newResponse, err := s.getter(s.ctx, s.info)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Successful GET; this is the network stream we'll read from.
|
||||
s.setResponse(newResponse)
|
||||
resp = newResponse
|
||||
}
|
||||
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||
err = &net.DNSError{IsTemporary: true}
|
||||
}
|
||||
|
||||
// We successfully read data or end EOF.
|
||||
if err == nil || err == io.EOF {
|
||||
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
|
||||
if s.info.Count != CountToEnd {
|
||||
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
|
||||
}
|
||||
return n, err // Return the return to the caller
|
||||
}
|
||||
s.Close() // Error, close stream
|
||||
s.setResponse(nil) // Our stream is no longer good
|
||||
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||
_, isNetError := err.(net.Error)
|
||||
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil {
|
||||
failureCount := try + 1 // because try is zero-based
|
||||
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
|
||||
}
|
||||
|
||||
if willRetry {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
return n, err // Not retryable, or retries exhausted, so just return
|
||||
}
|
||||
}
|
||||
|
||||
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||
// which is exactly the behaviour we want.
|
||||
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||
// then there are two different types of error that may happen - either the one one we check for here,
|
||||
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
|
||||
if s.o.TreatEarlyCloseAsError {
|
||||
return false // user wants all early closes to be errors, and so not retryable
|
||||
}
|
||||
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
|
||||
}
|
||||
|
||||
const ReadOnClosedBodyMessage = "read on closed response body"
|
||||
|
||||
func (s *retryReader) Close() error {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
if s.response != nil && s.response.Body != nil {
|
||||
return s.response.Body.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user