diff --git a/go.mod b/go.mod index c37b50b0f..c7466cdf5 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898 cloud.google.com/go v0.39.0 github.com/FiloSottile/b2 v0.0.0-20170207175032-b197f7a2c317 - github.com/aws/aws-sdk-go v1.14.31 + github.com/aws/aws-sdk-go v1.34.0 github.com/bradfitz/latlong v0.0.0-20140711231157-b74550508561 github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f // indirect github.com/cznic/internal v0.0.0-20170905175358-4747030f7cf2 // indirect @@ -20,8 +20,7 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 - github.com/go-ini/ini v1.38.1 // indirect - github.com/go-sql-driver/mysql v1.4.1-0.20180719071942-99ff426eb706 + github.com/go-sql-driver/mysql v1.5.0 github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect github.com/google/go-cmp v0.3.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 @@ -40,20 +39,17 @@ require ( github.com/nf/cr2 v0.0.0-20140528043846-05d46fef4f2f github.com/onsi/ginkgo v1.8.0 // indirect github.com/onsi/gomega v1.5.0 // indirect - github.com/pkg/errors v0.8.0 // indirect github.com/pkg/sftp v0.0.0-20180419200840-5bf2a174b604 github.com/plaid/plaid-go v0.0.0-20161222051224-02b6af68061b github.com/russross/blackfriday v2.0.0+incompatible github.com/rwcarlsen/goexif v0.0.0-20180518182100-8d986c03457a github.com/shurcooL/sanitized_anchor_name v0.0.0-20150515002706-11a20b799bf2 // indirect - github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect - github.com/stretchr/testify v1.3.0 // indirect github.com/syndtr/goleveldb v0.0.0-20180608030153-db3ee9ee8931 github.com/tgulacsi/picago v0.0.0-20171229130838-9e1ac2306c70 go4.org v0.0.0-20190218023631-ce4c26f7be8e golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f golang.org/x/image v0.0.0-20190523035834-f03afa92d3ff - golang.org/x/net v0.0.0-20190522155817-f3200d17e092 + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0 golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sys v0.0.0-20190528183647-3626398d7749 // indirect @@ -64,9 +60,7 @@ require ( google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69 // indirect google.golang.org/grpc v1.21.0 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/ini.v1 v1.42.0 // indirect gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 - gopkg.in/yaml.v2 v2.2.2 // indirect rsc.io/pdf v0.0.0-20170302045715-1d34785eb915 rsc.io/qr v0.1.0 ) diff --git a/go.sum b/go.sum index aa853bda1..533798698 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,8 @@ github.com/FiloSottile/b2 v0.0.0-20170207175032-b197f7a2c317 h1:1GuMjC4tjfwnWBdo github.com/FiloSottile/b2 v0.0.0-20170207175032-b197f7a2c317/go.mod h1:3DBotXAz3n/g1px/orhrK7xBJLjfaJRRrsEAJiUYEtY= github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= -github.com/aws/aws-sdk-go v1.14.31 h1:amhorvKh1zNxo9YCntvA5uDmgw+pCYXOp4xO8WS1oDg= -github.com/aws/aws-sdk-go v1.14.31/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.34.0 h1:brux2dRrlwCF5JhTL7MUT3WUwo9zfDHZZp3+g3Mvlmo= +github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/bradfitz/latlong v0.0.0-20140711231157-b74550508561 h1:mz4equOOUOnI4q5E7dyHlRx1x63YEaYwhlVluCDila4= github.com/bradfitz/latlong v0.0.0-20140711231157-b74550508561/go.mod h1:ZcXX9BndVQx6Q/JM6B8x7dLE9sl20S+TQsv4KO7tEQk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -43,16 +43,12 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 h1:GOfMz6cRgTJ9jWV0qAezv642OhPnKEG7gtUjJSdStHE= github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17/go.mod h1:HfkOCN6fkKKaPSAeNq/er3xObxTW4VLeY6UUK895gLQ= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.38.1 h1:hbtfM8emWUVo9GnXSloXYyFbXxZ+tG6sbepSStoe1FY= -github.com/go-ini/ini v1.38.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-sql-driver/mysql v1.4.1-0.20180719071942-99ff426eb706 h1:6V8xVlmRsgO5v8TJMTkYpg2hzmH2RM7HpkgtDtup4nM= -github.com/go-sql-driver/mysql v1.4.1-0.20180719071942-99ff426eb706/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -80,13 +76,11 @@ github.com/hjfreyer/taglib-go v0.0.0-20151027170453-0ef8bba9c41b h1:Q4OOFmH18aIj github.com/hjfreyer/taglib-go v0.0.0-20151027170453-0ef8bba9c41b/go.mod h1:eSDoUM0WCOj3y5CUX9yQVbMSlGd4YSd/ukmLIhxLZaI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jonas-p/go-shp v0.1.1 h1:LY81nN67DBCz6VNFn2kS64CjmnDo9IP8rmSkTvhO9jE= github.com/jonas-p/go-shp v0.1.1/go.mod h1:MRIhyxDQ6VVp0oYeD7yPGr5RSTNScUFKCDsI5DR7PtI= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -94,8 +88,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v0.0.0-20130607063955-9afcd9aa7931 h1:iG6qKXF9fh1vkUvi+5NCgWY+4Yms+rYZfXrtZzZt978= -github.com/lib/pq v0.0.0-20130607063955-9afcd9aa7931/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailgun/mailgun-go v0.0.0-20171127222028-17e8bd11e87c h1:5huPh/MfWW65cx8KWNVD4mCCnwIrNiX4bFJR5OeONg0= @@ -117,8 +109,8 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v0.0.0-20180419200840-5bf2a174b604 h1:UwroZOlj0TBw175tcq/HdTB0XssY/5HizYVRg4zwsU8= github.com/pkg/sftp v0.0.0-20180419200840-5bf2a174b604/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk= github.com/plaid/plaid-go v0.0.0-20161222051224-02b6af68061b h1:Don6I/E8nLCT6gdBi1sKB9hYxkx/24YD7XWwSly8IEo= @@ -131,13 +123,9 @@ github.com/rwcarlsen/goexif v0.0.0-20180518182100-8d986c03457a h1:ZDZdsnbMuRSoVb github.com/rwcarlsen/goexif v0.0.0-20180518182100-8d986c03457a/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20150515002706-11a20b799bf2 h1:YSuCoQrOAAaaU7L/VkFo9UilOcMV19Po32oJAnflq5U= github.com/shurcooL/sanitized_anchor_name v0.0.0-20150515002706-11a20b799bf2/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/syndtr/goleveldb v0.0.0-20180608030153-db3ee9ee8931 h1:2zIFG6OP0ab/aqPljiAz1WMBGbg63kXTHGab7PueE6s= github.com/syndtr/goleveldb v0.0.0-20180608030153-db3ee9ee8931/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/tgulacsi/picago v0.0.0-20171229130838-9e1ac2306c70 h1:elvpffAnrLcWnsunBkvTwxr+Q79bPSNT1+2/pOFkCj0= @@ -165,19 +153,17 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190509222800-a4d6f7feada5/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0 h1:xFEXbcD0oa/xhqQmMXztdZ0bWvexAWds+8c1gRN8nu0= golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -203,8 +189,6 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/api v0.5.0 h1:lj9SyhMzyoa38fgFF0oO2T6pjs5IzkLPKfVtxpyCRMM= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -224,13 +208,10 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 h1:/saqWwm73dLmuzbNhe92F0QsZ/KiFND+esHco2v1hiY= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt index 5f14d1162..899129ecc 100644 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 000000000..1c4967429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 56fdfc2bf..99849c0e1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -138,8 +138,27 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 0202a008f..9cf7eaf40 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -1,6 +1,9 @@ package awserr -import "fmt" +import ( + "encoding/hex" + "fmt" +) // SprintError returns a string of the formatted error code. // @@ -119,6 +122,7 @@ type requestError struct { awsError statusCode int requestID string + bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + // An error list that satisfies the golang interface type errorList []error @@ -181,7 +208,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) + msg += e[i].Error() // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go index 59fa4a558..142a7a01c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool { rb := reflect.Indirect(reflect.ValueOf(b)) if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal + // If the elements are both nil, and of the same type they are equal // If they are of different types they are not equal return reflect.TypeOf(a) == reflect.TypeOf(b) } else if raValid != rbValid { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 11c52c389..a4eb6a7f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer value = value.FieldByNameFunc(func(name string) bool { if c == name { return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + } else if !caseSensitive && strings.EqualFold(name, c) { return true } return false @@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue } + setValue(rval, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index b6432f1a1..645df2450 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { case reflect.Struct: buf.WriteString("{\n") - names := []string{} for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { continue // ignore unexported fields } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { continue // ignore unset fields } - names = append(names, name) - } - for i, n := range names { - val := v.FieldByName(n) buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) + buf.WriteString(ft.Name + ": ") - if i < len(names)-1 { - buf.WriteString(",\n") + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) } + + buf.WriteString(",\n") } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 212fe25e7..03334d692 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -12,13 +12,14 @@ import ( type Config struct { Config *aws.Config Handlers request.Handlers + PartitionID string Endpoint string SigningRegion string SigningName string // States that the signing name did not come from a modeled source but // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overriden based on metadata the + // to determine if the signin name can be overridden based on metadata the // service has. SigningNameDerived bool } @@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 + maxRetries = DefaultRetryerMaxNumRetries } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d04..9f6af19dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,6 +1,7 @@ package client import ( + "math" "strconv" "time" @@ -9,82 +10,142 @@ import ( ) // DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. // -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration } +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - minTime = 500 + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay } retryCount := r.RetryCount - if throttle && retryCount > 8 { - retryCount = 8 - } else if retryCount > 13 { - retryCount = 13 + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) } + return delay + initialDelay +} - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable != nil { return *r.Retryable } - - if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long // it will wait before attempting another request -func getRetryDelay(r *request.Request) (time.Duration, bool) { +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { if !canUseRetryAfterHeader(r) { return 0, false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index ce9fb896d..8958c32d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -67,10 +67,14 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, @@ -118,6 +122,12 @@ var LogHTTPResponseHandler = request.NamedHandler{ func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) if logBody { r.HTTPResponse.Body = &teeReaderCloser{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 920e9fddf..0c48f72e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -5,6 +5,7 @@ type ClientInfo struct { ServiceName string ServiceID string APIVersion string + PartitionID string Endpoint string SigningName string SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 000000000..881d575f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 5421b5d4e..3b809e847 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -18,9 +18,9 @@ const UseServiceDefaultRetries = -1 type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig tructure. +// all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -43,10 +43,10 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. + // to `nil` or the value to `""` to use the default generated endpoint. // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. Endpoint *string // The resolver to use for looking up endpoints for AWS service clients @@ -65,8 +65,8 @@ type Config struct { // noted. A full list of regions is found in the "Regions and Endpoints" // document. // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. Region *string // Set this to `true` to disable SSL when sending requests. Defaults @@ -120,9 +120,10 @@ type Config struct { // will use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` @@ -137,7 +138,7 @@ type Config struct { // `ExpectContinueTimeout` for information on adjusting the continue wait // timeout. https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experience issues + // You should use this flag to disable 100-Continue if you experience issues // with proxies or third party S3 compatible services. S3Disable100Continue *bool @@ -160,6 +161,17 @@ type Config struct { // on GetObject API calls. S3DisableContentMD5Validation *bool + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -171,7 +183,7 @@ type Config struct { // // Example: // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) + // .WithEC2MetadataDisableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -182,7 +194,7 @@ type Config struct { // both IPv4 and IPv6 addressing. // // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session + // to make requests. It is not recommended to set this value on the session // as it will apply to all service clients created with the session. Even // services which don't support dual stack endpoints. // @@ -223,12 +235,41 @@ type Config struct { // Key: aws.String("//foo//bar//moo"), // }) DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), @@ -356,6 +397,13 @@ func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { } +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + // WithUseDualStack sets a config UseDualStack value returning a Config // pointer for chaining. func (c *Config) WithUseDualStack(enable bool) *Config { @@ -377,6 +425,19 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { return c } +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -384,6 +445,20 @@ func (c *Config) MergeIn(cfgs ...*Config) { } } +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -457,6 +532,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation } + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -476,6 +555,22 @@ func mergeInConfig(dst *Config, other *Config) { if other.EnforceShouldRetryCheck != nil { dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go similarity index 58% rename from vendor/github.com/aws/aws-sdk-go/aws/context.go rename to vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go index 79f426853..2866f9a7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -1,8 +1,8 @@ +// +build !go1.9 + package aws -import ( - "time" -) +import "time" // Context is an copy of the Go v1.7 stdlib's context.Context interface. // It is represented as a SDK interface to enable you to use the "WithContext" @@ -35,37 +35,3 @@ type Context interface { // functions. Value(key interface{}) interface{} } - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go deleted file mode 100644 index 064f75c92..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -var ( - backgroundCtx = context.Background() -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 000000000..3718b26e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 000000000..2f9446333 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 000000000..9c29f29af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 000000000..304fd1561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index ff5d58e06..4e076c183 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v @@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index cfcddf3dc..d95a5eb54 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{ signedTime = r.LastSignedAt } - // 10 minutes to allow for some clock skew/delays in transmission. + // 5 minutes to allow for some clock skew/delays in transmission. // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { + if signedTime.Add(5 * time.Minute).After(time.Now()) { return } @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or @@ -223,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? r.Error = aws.ErrMissingEndpoint } }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index a15f496bc..ab69c7a6f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -17,7 +17,7 @@ var SDKVersionUserAgentHandler = request.NamedHandler{ } const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec_env` +const execEnvUAKey = `exec-env` // AddHostExecEnvUserAgentHander is a request handler appending the SDK's // execution environment to the user agent. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index f298d6596..3ad1e798d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -9,9 +9,7 @@ var ( // providers in the ChainProvider. // // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly + // aws.Config.CredentialsChainVerboseErrors to true. ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", `no valid providers in chain. Deprecated. For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 000000000..5852b2648 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 000000000..388b21541 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 000000000..8152a864a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 000000000..4356edb3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index ed086992f..9f8fd92a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -49,8 +49,12 @@ package credentials import ( - "sync" + "fmt" + "sync/atomic" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -64,8 +68,6 @@ import ( // Credentials: credentials.AnonymousCredentials, // }))) // // Access public S3 buckets. -// -// @readonly var AnonymousCredentials = NewStaticCredentials("", "", "") // A Value is the AWS credentials value for individual credential fields. @@ -83,6 +85,12 @@ type Value struct { ProviderName string } +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. @@ -99,6 +107,21 @@ type Provider interface { IsExpired() bool } +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + // An ErrorProvider is a stub credentials provider that always returns an error // this is used by the SDK when construction a known provider is not possible // due to an error. @@ -158,13 +181,19 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now } - return e.expiration.Before(e.CurrentTime()) + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration } -// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // @@ -176,24 +205,24 @@ func (e *Expiry) IsExpired() bool { // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex + creds atomic.Value + sf singleflight.Group provider Provider } // NewCredentials returns a pointer to a new Credentials with the provider set. func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, + c := &Credentials{ + provider: provider, } + c.creds.Store(Value{}) + return c } -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. // // Will return the cached credentials Value if it has not expired. If the // credentials Value has expired the Provider's Retrieve() will be called @@ -201,31 +230,56 @@ func NewCredentials(provider Provider) *Credentials { // // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() } - c.m.RUnlock() - - // Credentials are expired need to retrieve the credentials taking the full - // lock. - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false + if err == nil { + c.creds.Store(creds) } - return c.creds, nil + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) } // Expire expires the credentials and forces them to be retrieved on the @@ -234,10 +288,7 @@ func (c *Credentials) Get() (Value, error) { // This will override the Provider's expired state, and force Credentials // to call the Provider's Retrieve(). func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true + c.creds.Store(Value{}) } // IsExpired returns if the credentials are no longer valid, and need @@ -246,13 +297,43 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpired() + return c.isExpired(c.creds.Load()) } // isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() +func (c *Credentials) isExpired(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), + nil) + } + if c.creds.Load().(Value) == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 0ed791be6..92af5b725 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -7,10 +7,12 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) @@ -86,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(* // Error will be returned if the request fails, or unable to extract // the desired credentials. func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -96,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { } credsName := credsList[0] - roleCreds, err := requestCred(m.Client, credsName) + roleCreds, err := requestCred(ctx, m.Client, credsName) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -129,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) if err != nil { return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) } @@ -142,7 +151,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -152,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // // If the credentials cannot be found, or there is an error reading the response // and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", @@ -164,7 +174,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index a4cec5c55..785f30d8e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -39,6 +39,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -65,6 +66,10 @@ type Provider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -93,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } @@ -111,7 +116,13 @@ func (p *Provider) IsExpired() bool { // Retrieve will attempt to request the credentials from the endpoint the Provider // was configured for. And error will be returned if the retrieval fails. func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) if err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New("CredentialsEndpointError", "failed to load credentials", err) @@ -143,7 +154,7 @@ type errorOutput struct { Message string `json:"message"` } -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { op := &request.Operation{ Name: "GetCredentials", HTTPMethod: "GET", @@ -151,7 +162,11 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } return out, req.Send() } @@ -167,7 +182,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode endpoint credentials", err, ) @@ -178,11 +193,15 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, ) + return } // Response body format is not consistent between metadata endpoints. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index c14231a16..54c5cf733 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider" var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. - // - // @readonly ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key // can't be found in the process's environment. - // - // @readonly ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 000000000..e62483600 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 51e21e0f3..22b5c5d9f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -4,9 +4,8 @@ import ( "fmt" "os" - "github.com/go-ini/ini" - "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" "github.com/aws/aws-sdk-go/internal/shareddefaults" ) @@ -18,8 +17,9 @@ var ( ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { @@ -77,36 +77,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool { // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) + config, err := ini.OpenFile(filename) if err != nil { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) } - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) + nil) } - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } // Default to empty string if not found - token := iniProfile.Key("aws_session_token") + token := iniProfile.String("aws_session_token") return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, ProviderName: SharedCredsProviderName, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 4f5dab3fc..cbba1e3d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider" var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) @@ -21,7 +19,9 @@ type StaticProvider struct { } // NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. func NewStaticCredentials(id, secret, token string) *Credentials { return NewCredentials(&StaticProvider{Value: Value{ AccessKeyID: id, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 4108e433e..6846ef6f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -80,16 +80,19 @@ package stscreds import ( "fmt" + "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. // An error is returned if reading from stdin fails. // // Use this function go read MFA tokens from stdin. The function makes no attempt @@ -102,7 +105,7 @@ import ( // Will wait forever until something is provided on the stdin. func StdinTokenProvider() (string, error) { var v string - fmt.Printf("Assume Role MFA token code: ") + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") _, err := fmt.Scanln(&v) return v, err @@ -116,6 +119,10 @@ type AssumeRoler interface { AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) } +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + // DefaultDuration is the default amount of time in minutes that the credentials // will be valid for. var DefaultDuration = time.Duration(15) * time.Minute @@ -142,6 +149,13 @@ type AssumeRoleProvider struct { // Session name, if you wish to reuse the credentials elsewhere. RoleSessionName string + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. Duration time.Duration @@ -155,6 +169,29 @@ type AssumeRoleProvider struct { // size. Policy *string + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + // The identification number of the MFA device that is associated with the user // who is making the AssumeRole call. Specify this value if the trust policy // of the role being assumed includes a condition that requires MFA authentication. @@ -193,6 +230,18 @@ type AssumeRoleProvider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 } // NewCredentials returns a pointer to a new Credentials object wrapping the @@ -244,7 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -254,11 +307,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { // Expire as often as AWS permits. p.Duration = DefaultDuration } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, } if p.Policy != nil { input.Policy = p.Policy @@ -281,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } } - roleOutput, err := p.Client.AssumeRole(input) + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 000000000..6feb262b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,135 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 152d785b3..25a66d1dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -1,30 +1,61 @@ -// Package csm provides Client Side Monitoring (CSM) which enables sending metrics -// via UDP connection. Using the Start function will enable the reporting of -// metrics on a given port. If Start is called, with different parameters, again, -// a panic will occur. +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. // -// Pause can be called to pause any metrics publishing on a given port. Sessions -// that have had their handlers modified via InjectHandlers may still be used. -// However, the handlers will act as a no-op meaning no metrics will be published. +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. // -// Example: // r, err := csm.Start("clientID", ":31000") // if err != nil { // panic(fmt.Errorf("failed starting CSM: %v", err)) // } // +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// // sess, err := session.NewSession(&aws.Config{}) // if err != nil { // panic(fmt.Errorf("failed loading session: %v", err)) // } // +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. // r.InjectHandlers(&sess.Handlers) // -// client := s3.New(sess) -// resp, err := client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() // // // Will pause monitoring // r.Pause() @@ -35,12 +66,4 @@ // // // Resume monitoring // r.Continue() -// -// Start returns a Reporter that is used to enable or disable monitoring. If -// access to the Reporter is required later, calling Get will return the Reporter -// singleton. -// -// Example: -// r := csm.Get() -// r.Continue() package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go index 2f0c6eac9..4b19e2800 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -2,6 +2,7 @@ package csm import ( "fmt" + "strings" "sync" ) @@ -9,19 +10,40 @@ var ( lock sync.Mutex ) -// Client side metric handler names const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" ) -// Start will start the a long running go routine to capture +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture // client side metrics. Calling start multiple time will only // start the metric listener once and will panic if a different // client ID or port is passed in. // -// Example: -// r, err := csm.Start("clientID", "127.0.0.1:8094") +// r, err := csm.Start("clientID", "127.0.0.1:31000") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go index 4b0d630e4..5bacc791a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -3,6 +3,8 @@ package csm import ( "strconv" "time" + + "github.com/aws/aws-sdk-go/aws" ) type metricTime time.Time @@ -39,6 +41,12 @@ type metric struct { SDKException *string `json:"SdkException,omitempty"` SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + DestinationIP *string `json:"DestinationIp,omitempty"` ConnectionReused *int `json:"ConnectionReused,omitempty"` @@ -48,4 +56,54 @@ type metric struct { DNSLatency *int `json:"DnsLatency,omitempty"` TCPLatency *int `json:"TcpLatency,omitempty"` SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go index 514fc3739..82a3e345e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -16,25 +16,26 @@ var ( type metricChan struct { ch chan metric - paused int64 + paused *int64 } func newMetricChan(size int) metricChan { return metricChan{ - ch: make(chan metric, size), + ch: make(chan metric, size), + paused: new(int64), } } func (ch *metricChan) Pause() { - atomic.StoreInt64(&ch.paused, pausedEnum) + atomic.StoreInt64(ch.paused, pausedEnum) } func (ch *metricChan) Continue() { - atomic.StoreInt64(&ch.paused, runningEnum) + atomic.StoreInt64(ch.paused, runningEnum) } func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(&ch.paused) + v := atomic.LoadInt64(ch.paused) return v == pausedEnum } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 000000000..54a99280c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index 11082e5ed..835bcd49c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -10,11 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -const ( - // DefaultPort is used when no port is specified - DefaultPort = "31000" -) - // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { @@ -71,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { XAmzRequestID: aws.String(r.RequestID), - AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } @@ -82,26 +76,29 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { if r.Error != nil { if awserr, ok := r.Error.(awserr.Error); ok { - setError(&m, awserr) + m.SetException(getMetricException(awserr)) } } + m.TruncateFields() rep.metricsCh.Push(m) } -func setError(m *metric, err awserr.Error) { +func getMetricException(err awserr.Error) metricException { msg := err.Error() code := err.Code() switch code { - case "RequestError", - "SerializationError", + case request.ErrCodeRequestError, + request.ErrCodeSerialization, request.CanceledErrorCode: - m.SDKException = &code - m.SDKExceptionMessage = &msg + return sdkException{ + requestException{exception: code, message: msg}, + } default: - m.AWSException = &code - m.AWSExceptionMessage = &msg + return awsException{ + requestException{exception: code, message: msg}, + } } } @@ -112,16 +109,31 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { now := time.Now() m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } } + m.TruncateFields() + // TODO: Probably want to figure something out for logging dropped // metrics rep.metricsCh.Push(m) @@ -172,8 +184,9 @@ func (rep *Reporter) start() { } } -// Pause will pause the metric channel preventing any new metrics from -// being added. +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() @@ -185,8 +198,9 @@ func (rep *Reporter) Pause() { rep.close() } -// Continue will reopen the metric channel and allow for monitoring -// to be resumed. +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() @@ -201,10 +215,18 @@ func (rep *Reporter) Continue() { rep.metricsCh.Continue() } +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // -// Example: +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { @@ -221,11 +243,22 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { return } - apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} - apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) - handlers.Complete.PushFrontNamed(apiCallHandler) - handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } - handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) + return 0 } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 5040a2f64..23bb639e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // A Defaults provides a collection of default values for SDK clients. @@ -112,8 +113,8 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro } const ( - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ) // RemoteCredProvider returns a credentials provider for the default remote @@ -123,8 +124,8 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P return localHTTPCredProvider(cfg, handlers, u) } - if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("http://169.254.170.2%s", uri) + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) return httpCredProvider(cfg, handlers, u) } @@ -187,6 +188,7 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index c215cd3f5..a716c021c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,34 +4,87 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + // GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or +// instance metadata service. The content will be returned as a string, or // error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", HTTPPath: sdkuri.PathJoin("/meta-data", p), } - output := &metadataOutput{} + req := c.NewRequest(op, nil, output) - return output.Content, req.Send() + req.SetContext(ctx) + + err := req.Send() + return output.Content, err } // GetUserData returns the userdata that was configured for the service. If // there is no user-data setup for the EC2 instance a "NotFoundError" error // code will be returned. func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", @@ -40,19 +93,23 @@ func (c *EC2Metadata) GetUserData() (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) + req.SetContext(ctx) - return output.Content, req.Send() + err := req.Send() + return output.Content, err } // GetDynamicData uses the path provided to request information from the EC2 // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", @@ -61,15 +118,24 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) - return output.Content, req.Send() + err := req.Send() + return output.Content, err } // GetInstanceIdentityDocument retrieves an identity document describing an // instance. Error is returned if the request fails or is unable to parse // the response. func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") if err != nil { return EC2InstanceIdentityDocument{}, awserr.New("EC2MetadataRequestError", @@ -79,7 +145,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 instance identity document", err) } @@ -88,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument // IAMInfo retrieves IAM info from the metadata API func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") if err != nil { return EC2IAMInfo{}, awserr.New("EC2MetadataRequestError", @@ -98,7 +169,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 IAM info", err) } @@ -113,20 +184,36 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) if err != nil { return "", err } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil } // Available returns if the application has access to the EC2 Metadata service. // Can be used to determine if application is running within an EC2 Instance and // the metadata service is available. func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { return false } @@ -145,18 +232,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index ef5f73292..dc7e051e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -4,7 +4,7 @@ // This package's client can be disabled completely by setting the environment // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to // true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environemnt variable is set to true, (case insensitive). +// be used while the environment variable is set to true, (case insensitive). package ec2metadata import ( @@ -13,6 +13,7 @@ import ( "io" "net/http" "os" + "strconv" "strings" "time" @@ -24,9 +25,25 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" -const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,8 +80,10 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio // use a shorter timeout than default because the metadata // service is local if it is running, and to fail faster // if not running on an ec2 instance. - Timeout: 5 * time.Second, + Timeout: 1 * time.Second, } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) } svc := &EC2Metadata{ @@ -72,6 +91,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceName, Endpoint: endpoint, APIVersion: "latest", }, @@ -79,18 +99,35 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ), } - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) svc.Handlers.UnmarshalError.PushBack(unmarshalError) svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) // Disable the EC2 Metadata service if the environment variable is set. - // This shortcirctes the service's functionality to always fail to send + // This short-circuits the service's functionality to always fail to send // requests. if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { svc.Handlers.Send.SwapNamed(request.NamedHandler{ Name: corehandlers.SendHandler.Name, Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } r.Error = awserr.New( request.CanceledErrorCode, "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", @@ -103,7 +140,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio for _, option := range opts { option(svc.Client) } - return svc } @@ -115,30 +151,74 @@ type metadataOutput struct { Content string } -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } +type tokenOutput struct { + Token string + TTL time.Duration +} - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, } func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) return } // Response body format is not consistent between metadata endpoints. // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) } func validateEndpointHandler(r *request.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 000000000..d0a3a020d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,92 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 74f72de07..654fb1ad5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -83,18 +83,53 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol p := &ps[i] custAddEC2Metadata(p) custAddS3DualStack(p) + custRegionalS3(p) custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) } return ps, nil } func custAddS3DualStack(p *partition) { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custRegionalS3(p *partition) { if p.ID != "aws" { return } - s, ok := p.Services["s3"] + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] if !ok { return } @@ -102,7 +137,7 @@ func custAddS3DualStack(p *partition) { s.Defaults.HasDualStack = boxedTrue s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - p.Services["s3"] = s + p.Services[svcName] = s } func custAddEC2Metadata(p *partition) { @@ -122,6 +157,54 @@ func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 8e823bec0..4f9de24fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -11,20 +11,27 @@ const ( AwsPartitionID = "aws" // AWS Standard partition. AwsCnPartitionID = "aws-cn" // AWS China partition. AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. ) // AWS Standard partition's regions. const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -40,142 +47,22 @@ const ( // AWS GovCloud (US) partition's regions. const ( - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) -// Service identifiers +// AWS ISO (US) partition's regions. const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - SagemakerServiceID = "sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -183,7 +70,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -197,6 +84,8 @@ var defaultPartitions = partitions{ awsPartition, awscnPartition, awsusgovPartition, + awsisoPartition, + awsisobPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -210,7 +99,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") return reg }(), }, @@ -220,6 +109,12 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, "ap-northeast-1": region{ Description: "Asia Pacific (Tokyo)", }, @@ -239,16 +134,25 @@ var awsPartition = partition{ Description: "Canada (Central)", }, "eu-central-1": region{ - Description: "EU (Frankfurt)", + Description: "Europe (Frankfurt)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", }, "eu-west-1": region{ - Description: "EU (Ireland)", + Description: "Europe (Ireland)", }, "eu-west-2": region{ - Description: "EU (London)", + Description: "Europe (London)", }, "eu-west-3": region{ - Description: "EU (Paris)", + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", }, "sa-east-1": region{ Description: "South America (Sao Paulo)", @@ -273,9 +177,11 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, - "acm": service{ + "access-analyzer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -283,82 +189,110 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", + "fips-ca-central-1": endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "apigateway": service{ + "acm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "application-autoscaling": service{ + "acm-pca": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, + Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -366,46 +300,51 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "autoscaling": service{ + "api.detective": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -415,6 +354,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -425,197 +365,205 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "autoscaling-plans", - }, - }, - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "api.ecr": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "af-south-1", }, }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-east-1", }, }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "cloudhsm": service{ + "api.elastic-inference": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "cloudsearch": service{ + "api.mediatailor": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cloudtrail": service{ - + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, }, }, - "codebuild": service{ + "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -623,43 +571,47 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, "us-east-2": endpoint{}, "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, "us-west-1": endpoint{}, "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, "us-west-2": endpoint{}, "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, - "codecommit": service{ + "apigateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -667,9 +619,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -677,9 +632,13 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "codedeploy": service{ - + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -687,9 +646,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -697,9 +659,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "codepipeline": service{ + "appmesh": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -707,9 +670,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -717,24 +682,32 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "codestar": service{ - + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "cognito-identity": service{ + "appsync": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -742,60 +715,76 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cognito-idp": service{ + "athena": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cognito-sync": service{ - + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "comprehend": service{ + "autoscaling-plans": service{ Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Protocols: []string{"http", "https"}, }, - }, - "config": service{ - Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -803,9 +792,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -813,30 +804,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dax": service{ + "backup": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -844,15 +827,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ + "batch": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -860,25 +838,90 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "discovery": service{ + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-west-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, - "dms": service{ + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -886,9 +929,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -896,30 +942,25 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ds": service{ + "clouddirectory": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "cloudformation": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -927,59 +968,81 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, + }, + "cloudhsm": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", }, }, - }, - "ecr": service{ - Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -987,9 +1050,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -997,29 +1063,26 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ecs": service{ + "cloudsearch": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticache": service{ + "cloudtrail": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1027,54 +1090,109 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ + "codeartifact": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticfilesystem": service{ + "codebuild": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "codecommit": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1082,95 +1200,143 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, + "codedeploy": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "elastictranscoder": service{ + "codepipeline": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, }, - }, - Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "es": service{ + "codestar": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "events": service{ + "codestar-connections": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1180,6 +1346,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1190,33 +1357,77 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "firehose": service{ + "cognito-identity": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "cognito-idp": service{ + Endpoints: endpoints{ - "eu-west-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "gamelift": service{ + "cognito-sync": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1224,22 +1435,85 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "glacier": service{ + "comprehend": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, + }, + "config": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1247,145 +1521,138 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "glue": service{ + "connect": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, }, + }, + "data.mediastore": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "dataexchange": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ + "datapipeline": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, + "datasync": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "kinesis": service{ + "dax": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -1397,27 +1664,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "kinesisanalytics": service{ + "devicefarm": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ + "directconnect": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1425,57 +1682,172 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "lambda": service{ + "discovery": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "lightsail": service{ + "dms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "logs": service{ + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1483,95 +1855,186 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, }, - }, - "mediapackage": service{ - Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "mediastore": service{ + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, }, }, + }, + "ecs": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1579,45 +2042,50 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "monitoring": service{ + "eks": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1625,58 +2093,41 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "opsworks": service{ + "elasticache": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1684,56 +2135,30 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "polly": service{ + "elasticbeanstalk": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1741,19 +2166,48 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1761,21 +2215,146 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "redshift": service{ - + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1783,178 +2362,274 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "rekognition": service{ - + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "resource-groups": service{ + "elastictranscoder": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ + "email": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "runtime.lex": service{ + "entitlement.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ - Service: "lex", + Service: "aws-marketplace", }, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "runtime.sagemaker": service{ + "es": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, + "events": service{ - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "sagemaker": service{ + "firehose": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "secretsmanager": service{ - + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1962,65 +2637,109 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "servicecatalog": service{ + "forecast": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -2028,41 +2747,32 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "servicediscovery": service{ + "forecastquery": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "Shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "sms": service{ + "fsx": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2070,20 +2780,21 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "snowball": service{ + "gamelift": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2091,7 +2802,6 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2099,11 +2809,13 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "sns": service{ + "glacier": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2111,22 +2823,53 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, + "glue": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2134,341 +2877,5884 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "macie2": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "ssm-facade-fips-us-east-1": endpoint{ + Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ssm-facade-fips-us-east-2": endpoint{ + Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "ssm-facade-fips-us-west-1": endpoint{ + Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "ssm-facade-fips-us-west-2": endpoint{ + Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + "fips-aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "kinesis-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "kinesis-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-gov-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-gov-west-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + "ssm-facade-fips-us-gov-east-1": endpoint{ + Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-gov-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + "ssm-facade-fips-us-gov-west-1": endpoint{ + Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-gov-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, "states": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, "storagegateway": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, "streams.dynamodb": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "sts": service{ - PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-gov-east-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-gov-west-1", }, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-gov-west-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-gov-west-1", }, }, }, }, - "support": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, "swf": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "tagging": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "translate": service{ + "transcribe": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, }, - }, - "workdocs": service{ - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "waf-regional": service{ + Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "workspaces": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, "xray": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, }, } -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() } -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") return reg }(), }, @@ -2478,107 +8764,124 @@ var awscnPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", + "us-iso-east-1": region{ + Description: "US ISO East", }, }, Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "codedeploy": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "cognito-identity": service{ - + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "config": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, }, }, "ds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "ec2metadata": service{ @@ -2592,194 +8895,177 @@ var awscnPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "elasticache": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, }, }, "es": service{ Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "events": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, + }, + "health": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "iam": service{ - PartitionEndpoint: "aws-cn-global", + PartitionEndpoint: "aws-iso-global", IsRegionalized: boxedFalse, Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-iso-east-1", }, }, }, }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, + "kinesis": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "kinesis": service{ + "kms": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, }, }, "lambda": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "logs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "rds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "redshift": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "sms": service{ + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, }, }, - "snowball": service{ + "runtime.sagemaker": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "sns": service{ + "s3": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, }, }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, }, + }, + "sns": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "ssm": service{ + "sqs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "storagegateway": service{ + "states": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, "streams.dynamodb": service{ @@ -2790,46 +9076,70 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "sts": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, }, }, "swf": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "tagging": service{ + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, }, }, }, } -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() } -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") return reg }(), }, @@ -2839,99 +9149,77 @@ var awsusgovPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", }, }, Services: services{ - "acm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, }, - }, - "apigateway": service{ - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, + Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "cloudtrail": service{ + "cloudformation": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "codedeploy": service{ + "cloudtrail": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "config": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "dms": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, }, }, "dynamodb": service{ - + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, + "us-isob-east-1": endpoint{}, }, }, "ec2": service{ - + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "ec2metadata": service{ @@ -2945,247 +9233,188 @@ var awsusgovPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "elasticache": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "elasticloadbalancing": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, }, }, }, "elasticmapreduce": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, - "es": service{ + "events": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "events": service{ + "glacier": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "glacier": service{ + "health": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, "iam": service{ - PartitionEndpoint: "aws-us-gov-global", + PartitionEndpoint: "aws-iso-b-global", IsRegionalized: boxedFalse, Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-isob-east-1", }, }, }, }, - "inspector": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "kinesis": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "kms": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, }, }, "lambda": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "logs": service{ + "license-manager": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "monitoring": service{ + "logs": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "polly": service{ + "monitoring": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "rds": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "redshift": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "s3": service{ Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, - }, - "sms": service{ - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "sns": service{ - + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, "sqs": service{ - + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, "ssm": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "storagegateway": service{ + "states": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "streams.dynamodb": service{ Defaults: endpoint{ + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, + "us-isob-east-1": endpoint{}, }, }, "sts": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "swf": service{ + "support": service{ + PartitionEndpoint: "aws-iso-b-global", Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, }, }, - "tagging": service{ + "swf": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 000000000..ca8fc828e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index e29c09512..ca956e5f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -3,6 +3,7 @@ package endpoints import ( "fmt" "regexp" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -35,7 +36,7 @@ type Options struct { // // If resolving an endpoint on the partition list the provided region will // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unkonwn and resolving + // endpoint ID with. If both the service and region are unknown and resolving // the endpoint on partition list an UnknownEndpointError error will be returned. // // If resolving and endpoint on a partition specific resolver that partition's @@ -46,6 +47,108 @@ type Options struct { // // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } } // Set combines all of the option functions together. @@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) { o.ResolveUnknownService = true } +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -170,10 +279,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id string - p *partition + id, dnsSuffix string + p *partition } +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } @@ -191,7 +303,7 @@ func (p Partition) ID() string { return p.id } // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned may look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage // of new regions and services expansions. // @@ -205,7 +317,7 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} + rs := make(map[string]Region, len(p.p.Regions)) for id, r := range p.p.Regions { rs[id] = Region{ id: id, @@ -220,7 +332,7 @@ func (p Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. func (p Partition) Services() map[string]Service { - ss := map[string]Service{} + ss := make(map[string]Service, len(p.p.Services)) for id := range p.p.Services { ss[id] = Service{ id: id, @@ -307,7 +419,7 @@ func (s Service) Regions() map[string]Region { // A region is the AWS region the service exists in. Whereas a Endpoint is // an URL that can be resolved to a instance of a service. func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ id: id, @@ -347,6 +459,9 @@ type ResolvedEndpoint struct { // The endpoint URL URL string + // The endpoint partition + PartitionID string + // The region that should be used for signing requests. SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 000000000..df75e899a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index ff6f76db6..773613722 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -7,6 +7,8 @@ import ( "strings" ) +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + type partitions []partition func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -54,8 +56,9 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - id: p.ID, - p: &p, + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, } } @@ -74,24 +77,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool) return p.RegionRegex.MatchString(region) } +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { var opt Options opt.Set(opts...) s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) } defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) } func serviceList(ss services) []string { @@ -200,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -208,11 +243,27 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op merged.mergeIn(e) e = merged - hostname := e.Hostname + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname // Offset the hostname for dualstack if enabled if opts.UseDualStack && e.HasDualStack == boxedTrue { hostname = e.DualStackHostname + region = signingRegion + } + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") } u := strings.Replace(hostname, "{service}", service, 1) @@ -222,25 +273,14 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - return ResolvedEndpoint{ URL: u, + PartitionID: partitionID, SigningRegion: signingRegion, SigningName: signingName, SigningNameDerived: signingNameDerived, SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } + }, nil } func getEndpointScheme(protocols []string, disableSSL bool) string { @@ -305,3 +345,7 @@ const ( boxedFalse boxedTrue ) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 05e92df22..0fdfcc56e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -16,6 +16,10 @@ import ( type CodeGenOptions struct { // Options for how the model will be decoded. DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool } // Set combines all of the option functions together @@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe return err } + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { return fmt.Errorf("failed to execute template, %v", err) } @@ -166,15 +178,17 @@ import ( "regexp" ) - {{ template "partition consts" . }} + {{ template "partition consts" $.Resolver }} - {{ range $_, $partition := . }} + {{ range $_, $partition := $.Resolver }} {{ template "partition region consts" $partition }} {{ end }} - {{ template "service consts" . }} + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} - {{ template "endpoint resolvers" . }} + {{ template "endpoint resolvers" $.Resolver }} {{- end }} {{ define "partition consts" }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go index 576636168..fa06f7a8f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr" var ( // ErrMissingRegion is an error that is returned if region configuration is // not found. - // - // @readonly ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) // ErrMissingEndpoint is an error that is returned if an endpoint cannot be // resolved for a service. - // - // @readonly ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index 271da432c..d9b37f4d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -1,18 +1,17 @@ -// +build !appengine,!plan9 - package request import ( - "net" - "os" - "syscall" + "strings" ) func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca43..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 605a72d3c..e819ab6c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -10,6 +10,7 @@ import ( type Handlers struct { Validate HandlerList Build HandlerList + BuildStream HandlerList Sign HandlerList Send HandlerList ValidateResponse HandlerList @@ -19,14 +20,16 @@ type Handlers struct { UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + CompleteAttempt HandlerList Complete HandlerList } -// Copy returns of this handler's lists. +// Copy returns a copy of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), Sign: h.Sign.copy(), Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), @@ -36,14 +39,16 @@ func (h *Handlers) Copy() Handlers { UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), Complete: h.Complete.copy(), } } -// Clear removes callback functions for all handlers +// Clear removes callback functions for all handlers. func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() + h.BuildStream.Clear() h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() @@ -53,9 +58,58 @@ func (h *Handlers) Clear() { h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.CompleteAttempt.Clear() h.Complete.Clear() } +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { @@ -272,3 +326,18 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { AddToUserAgent(r, s) } } + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index b0c2ef4fe..9370fa50c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -15,12 +15,15 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } reader.buf = buf - return reader + return reader, nil } // Close will close the instance of the offset reader's access to @@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } return newOffsetReader(o.buf, offset) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 75f0fe077..d597c6ead 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "net" "net/http" "net/url" "reflect" @@ -37,6 +36,10 @@ const ( // API request that was canceled. Requests given a aws.Context may // return this error when canceled. CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" ) // A Request is the service request to be made. @@ -52,6 +55,7 @@ type Request struct { HTTPRequest *http.Request HTTPResponse *http.Response Body io.ReadSeeker + streamingBody io.ReadCloser BodyStart int64 // offset from beginning of Body that the request body starts Params interface{} Error error @@ -65,6 +69,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -91,8 +104,12 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// New returns a new Request pointer for the service API -// operation and parameters. +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. // // Params is any value of input parameters to be the request payload. // Data is pointer value to an object which the request's response @@ -100,6 +117,10 @@ type Operation struct { func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + if retryer == nil { + retryer = noOpRetryer{} + } + method := operation.HTTPMethod if method == "" { method = "POST" @@ -114,15 +135,12 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } - SanitizeHostForHeader(httpReq) - r := &Request{ Config: cfg, ClientInfo: clientInfo, Handlers: handlers.Copy(), Retryer: retryer, - AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -233,6 +251,10 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -261,12 +283,32 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } r.ResetBody() } +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + // Presign returns the request's signed URL. Error will be returned -// if the signing fails. +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. // // It is invalid to create a presigned URL with a expire duration 0 or less. An // error is returned if expire duration is 0 or less. @@ -283,7 +325,9 @@ func (r *Request) Presign(expire time.Duration) (string, error) { } // PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. // // It is invalid to create a presigned URL with a expire duration 0 or less. An // error is returned if expire duration is 0 or less. @@ -328,16 +372,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -356,12 +399,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -377,20 +420,30 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } + SanitizeHostForHeader(r.HTTPRequest) + r.Handlers.Sign.Run(r) return r.Error } -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + if r.safeBody != nil { r.safeBody.Close() } - r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -407,10 +460,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) } - var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -462,80 +515,90 @@ func (r *Request) Send() error { r.Handlers.Complete.Run(r) }() + if err := r.Error; err != nil { + return err + } + for { + r.Error = nil r.AttemptTime = time.Now() - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - } - r.Sign() - if r.Error != nil { - return r.Error + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err } - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, err) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue + if err := r.sendRequest(); err == nil { + return nil } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - err := r.Error - - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, err) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error } - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, err) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue + if err := r.prepareRetry(); err != nil { + r.Error = err + return err } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } - break + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error } return nil @@ -561,32 +624,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - if awsErr.Code() == CanceledErrorCode { - return false - } - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() - } - } - - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") - -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) @@ -602,6 +639,10 @@ func getHost(r *http.Request) string { return r.Host } + if r.URL == nil { + return "" + } + return r.URL.Host } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 7c6a8000f..de1292f45 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -4,6 +4,8 @@ package request import ( "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -24,7 +26,8 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = err + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index a633ed5ac..64784e16f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -17,11 +17,13 @@ import ( // does the pagination between API operations, and Paginator defines the // configuration that will be used per page request. // -// cont := true -// for p.Next() && cont { +// for p.Next() { // data := p.Page().(*s3.ListObjectsOutput) // // process the page's data +// // ... +// // break out of loop to stop fetching additional pages // } +// // return p.Err() // // See service client API operation Pages methods for examples how the SDK will @@ -146,7 +148,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if v == false { + if !v { return nil } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 7d5270298..752ae47f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,32 +1,81 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } cfg.Retryer = retryer return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 } // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, + ErrCodeRequestError: {}, "RequestTimeout": {}, ErrCodeResponseTimeout: {}, "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout @@ -34,12 +83,16 @@ var retryableCodes = map[string]struct{}{ var throttleCodes = map[string]struct{}{ "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API "Throttling": {}, "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, + "RequestThrottledException": {}, "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -74,10 +127,6 @@ var validParentCodes = map[string]struct{}{ ErrCodeRead: {}, } -type temporaryError interface { - Temporary() bool -} - func isNestedErrorRetryable(parentErr awserr.Error) bool { if parentErr == nil { return false @@ -96,7 +145,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { return isCodeRetryable(aerr.Code()) } - if t, ok := err.(temporaryError); ok { + if t, ok := err.(temporary); ok { return t.Temporary() || isErrConnectionReset(err) } @@ -106,32 +155,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -141,17 +248,58 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 401246228..8630683f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,12 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" ) // Validator provides a way for types to perform validation logic on their @@ -232,3 +238,49 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen { func (e *ErrParamMinLen) MinLen() int { return e.min } + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 000000000..ea9ebb6f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 000000000..fec39dfc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 000000000..1c5a5391e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 000000000..fe6dac1f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,267 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 98d420fd6..7ec66e7e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,97 +1,93 @@ /* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. +Sessions options from Shared Config By default NewSession will only load credentials from the shared credentials file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value the Session will be created from the configuration values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. +Credential and config loading order - // Create Session - sess := session.Must(session.NewSession()) +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) - svc := s3.New(sess) +Creating Sessions -Create Session With Option Overrides +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. + // Create Session + sess, err := session.NewSession() -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ + sess, err := session.NewSessionWithOptions(session.Options{ // Options - })) + }) - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Force enable Shared Config support SharedConfigState: session.SharedConfigEnable, - })) + }) Adding Handlers -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -99,22 +95,15 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Println("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Params: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - Shared Config Fields -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -125,24 +114,31 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They are from a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds @@ -150,40 +146,16 @@ is set. mfa_serial = role_session_name = session_name -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - - region = us-east-1 - -Assume Role with MFA token -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. sess := session.Must(session.NewSessionWithOptions(session.Options{ AssumeRoleTokenProvider: stscreds.StdinTokenProvider, })) - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscrds.AssumeRoleProvider +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 82e04d76c..c1e0e9c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -1,11 +1,15 @@ package session import ( + "fmt" "os" "strconv" + "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -79,7 +83,7 @@ type envConfig struct { // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string - // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file // that the SDK will use instead of the system's root CA bundle. // Only use this if you want to configure the SDK to use a custom set // of CAs. @@ -98,15 +102,61 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string + CSMHost string CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool } var ( csmEnabledEnvKey = []string{ "AWS_CSM_ENABLED", } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } csmPortEnvKey = []string{ "AWS_CSM_PORT", } @@ -125,6 +175,10 @@ var ( "AWS_SESSION_TOKEN", } + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + regionEnvKeys = []string{ "AWS_REGION", "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set @@ -139,6 +193,24 @@ var ( sharedConfigFileEnvKey = []string{ "AWS_CONFIG_FILE", } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -147,7 +219,7 @@ var ( // If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value // the shared SDK config will be loaded in addition to the SDK's specific // configuration values. -func loadEnvConfig() envConfig { +func loadEnvConfig() (envConfig, error) { enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) return envConfigLoad(enableSharedConfig) } @@ -158,30 +230,42 @@ func loadEnvConfig() envConfig { // Loads the shared configuration in addition to the SDK's specific configuration. // This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` // environment variable is set. -func loadSharedEnvConfig() envConfig { +func loadSharedEnvConfig() (envConfig, error) { return envConfigLoad(true) } -func envConfigLoad(enableSharedConfig bool) envConfig { +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg := envConfig{} cfg.EnableSharedConfig = enableSharedConfig - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) // CSM environment variables setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 - // Require logical grouping of credentials - if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { - cfg.Creds = credentials.Value{} - } else { - cfg.Creds.ProviderName = EnvProviderName + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v } regionKeys := regionEnvKeys @@ -194,6 +278,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Region, regionKeys) setFromEnvVal(&cfg.Profile, profileKeys) + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) @@ -206,12 +296,48 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - return cfg + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + return cfg, nil } func setFromEnvVal(dst *string, keys []string) { for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 51f305563..0ff499605 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,19 +8,36 @@ import ( "io/ioutil" "net/http" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" ) +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + // A Session provides a central location to create service clients from and // store configurations and request handlers for those services. // @@ -56,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -76,19 +93,28 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } } return s @@ -107,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session { // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifying the +// control through code how the Session will be created, such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} @@ -191,6 +217,12 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -205,6 +237,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -238,13 +276,20 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } - if len(opts.Profile) > 0 { + if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } @@ -310,27 +355,33 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { - logger.Log("Enabling CSM") - if len(port) == 0 { - port = csm.DefaultPort +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, "127.0.0.1:"+port) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { - return + return err } r.InjectHandlers(handlers) + + return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -347,9 +398,17 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - return nil, err + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -362,8 +421,16 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } } // Setup HTTP client with custom cert bundle if enabled @@ -376,6 +443,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { @@ -388,7 +495,10 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { } } if t == nil { - t = &http.Transport{} + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() } p, err := loadCertPool(bundle) @@ -421,9 +531,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { - // Merge in user provided configuration - cfg.MergeIn(userCfg) +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { @@ -434,101 +546,67 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - cfg.Credentials = stscreds.NewCredentials( - &Session{ - Config: &cfgCp, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) } } - return nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { return nil } -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } } -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } } func initHandlers(s *Session) { @@ -539,7 +617,7 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current Session, coping the config +// Copy creates and returns a copy of the current Session, copying the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // @@ -559,47 +637,67 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) } return client.Config{ Config: s.Config, Handlers: s.Handlers, + PartitionID: resolved.PartitionID, Endpoint: resolved.URL, SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -609,12 +707,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -626,3 +721,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 09c8e5bc7..680805a38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,11 +2,12 @@ package session import ( "fmt" - "io/ioutil" + "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/go-ini/ini" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" ) const ( @@ -16,68 +17,127 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` // Additional Config fields regionKey = `region` + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. DefaultSharedConfigProfile = `default` -) -type assumeRoleConfig struct { - RoleARN string - SourceProfile string - ExternalID string - MFASerial string - RoleSessionName string -} + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. // // aws_access_key_id // aws_secret_access_key // aws_session_token Creds credentials.Value - AssumeRole assumeRoleConfig - AssumeRoleSource *sharedConfig + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. // // region Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool } type sharedConfigFile struct { Filename string - IniData *ini.File + IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. +// of the files are A then B, B's credential values will be used instead of +// A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -88,16 +148,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) } cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { return sharedConfig{}, err } - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - return cfg, nil } @@ -105,114 +160,283 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { files := make([]sharedConfigFile, 0, len(filenames)) for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { // Skip files which can't be opened and read for whatever reason continue - } - - f, err := ini.Load(b) - if err != nil { + } else if err != nil { return nil, SharedConfigLoadError{Filename: filename, Err: err} } files = append(files, sharedConfigFile{ - Filename: filename, IniData: f, + Filename: filename, IniData: sections, }) } return files, nil } -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { return err } } + profiles[profile] = struct{}{} - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if err := cfg.validateCredentialType(); err != nil { + return err } - cfg.AssumeRoleSource = &assumeRoleSrc + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() - return nil -} - -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } } return err } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { - section, err := file.IniData.GetSection(profile) - if err != nil { +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { // Fallback to to alternate profile name: profile - section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if err != nil { - return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} } } - // Shared Credentials - akid := section.Key(accessKeyIDKey).String() - secret := section.Key(secretAccessKey).String() - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.Key(sessionTokenKey).String(), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre } } - // Assume Role - roleArn := section.Key(roleArnKey).String() - srcProfile := section.Key(sourceProfileKey).String() - if len(roleArn) > 0 && len(srcProfile) > 0 { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - ExternalID: section.Key(externalIDKey).String(), - MFASerial: section.Key(mfaSerialKey).String(), - RoleSessionName: section.Key(roleSessionNameKey).String(), + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, } } - // Region - if v := section.Key(regionKey).String(); len(v) > 0 { - cfg.Region = v + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision } return nil } +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string @@ -270,7 +494,8 @@ func (e SharedConfigProfileNotExistsError) Error() string { // profile contains assume role information, but that information is invalid // or not complete. type SharedConfigAssumeRoleError struct { - RoleARN string + RoleARN string + SourceProfile string } // Code is the short id of the error. @@ -280,8 +505,10 @@ func (e SharedConfigAssumeRoleError) Code() string { // Message is the description of the error func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", - e.RoleARN) + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) } // OrigErr is the underlying error that caused the failure. @@ -293,3 +520,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error { func (e SharedConfigAssumeRoleError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go index 244c86da0..07ea799fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -1,8 +1,7 @@ package v4 import ( - "net/http" - "strings" + "github.com/aws/aws-sdk-go/internal/strings" ) // validator houses a set of rule needed for validation of a @@ -61,7 +60,7 @@ type patterns []string // been found func (p patterns) IsValid(value string) bool { for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + if strings.HasPrefixFold(value, pattern) { return true } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 000000000..f35fc860b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 000000000..fed5c859c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 000000000..02cbd97e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 8aa0681d3..d71f7b3f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -76,9 +76,14 @@ import ( ) const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + authHeaderPrefix = "AWS4-HMAC-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" + awsV4Request = "aws4_request" // emptyStringSHA256 is a SHA256 of an empty string emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` @@ -87,9 +92,9 @@ const ( var ignoredHeaders = rules{ blacklist{ mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, }, }, } @@ -134,6 +139,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, "X-Amz-Content-Sha256": struct{}{}, }, @@ -181,7 +187,7 @@ type Signer struct { // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html DisableURIPathEscaping bool - // Disales the automatical setting of the HTTP request's Body field with the + // Disables the automatical setting of the HTTP request's Body field with the // io.ReadSeeker passed in to the signer. This is useful if you're using a // custom wrapper around the body for the io.ReadSeeker and want to preserve // the Body value on the Request.Body. @@ -228,11 +234,9 @@ type signingCtx struct { DisableURIPathEscaping bool - credValues credentials.Value - isPresign bool - formattedTime string - formattedShortTime string - unsignedPayload bool + credValues credentials.Value + isPresign bool + unsignedPayload bool bodyDigest string signedHeaders string @@ -336,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi } var err error - ctx.credValues, err = v4.Credentials.Get() + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) if err != nil { return http.Header{}, err } @@ -421,7 +425,7 @@ var SignRequestHandler = request.NamedHandler{ // If the credentials of the request's config are set to // credentials.AnonymousCredentials the request will not be signed. func SignSDKRequest(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now) + SignSDKRequestWithCurrentTime(req, time.Now) } // BuildNamedHandler will build a generic handler for signing. @@ -429,12 +433,15 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler return request.NamedHandler{ Name: name, Fn: func(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now, opts...) + SignSDKRequestWithCurrentTime(req, time.Now, opts...) }, } } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -470,13 +477,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time opt(v4) } - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt - } - + curTime := curTimeFn() signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, ) if err != nil { req.Error = err @@ -485,7 +488,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time } req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTimeFn() + req.LastSignedAt = curTime } const logSignInfoMsg = `DEBUG: Request Signature: @@ -532,39 +535,56 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildSignature() // depends on string to sign if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature } else { parts := []string{ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, + authHeaderSignatureElem + ctx.signature, } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) } return nil } -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { if ctx.isPresign { duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) } } func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) if ctx.isPresign { ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) @@ -588,8 +608,7 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { var headers []string headers = append(headers, "host") for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { + if !r.IsValid(k) { continue // ignored header } if ctx.SignedHeaderVals == nil { @@ -653,19 +672,15 @@ func (ctx *signingCtx) buildCanonicalString() { func (ctx *signingCtx) buildStringToSign() { ctx.stringToSign = strings.Join([]string{ authHeaderPrefix, - ctx.formattedTime, + formatTime(ctx.Time), ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), }, "\n") } func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) ctx.signature = hex.EncodeToString(signature) } @@ -687,7 +702,11 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) } if includeSHA256Header { @@ -722,31 +741,45 @@ func (ctx *signingCtx) removePresign() { ctx.Query.Del("X-Amz-SignedHeaders") } -func makeHmac(key []byte, data []byte) []byte { +func hmacSHA256(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) hash.Write(data) return hash.Sum(nil) } -func makeSha256(data []byte) []byte { +func hashSHA256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) []byte { +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() - start, _ := reader.Seek(0, sdkio.SeekCurrent) - defer reader.Seek(start, sdkio.SeekStart) + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() - io.Copy(hash, reader) - return hash.Sum(nil) + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil } const doubleSpace = " " // stripExcessSpaces will rewrite the passed in slice's string values to not -// contain muliple side-by-side spaces. +// contain multiple side-by-side spaces. func stripExcessSpaces(vals []string) { var j, k, l, m, spaces int for i, str := range vals { @@ -786,3 +819,28 @@ func stripExcessSpaces(vals []string) { vals[i] = string(buf[:m]) } } + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 8b6f23425..98751ee84 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -2,18 +2,24 @@ package aws import ( "io" + "strings" "sync" "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -43,7 +49,8 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { @@ -199,3 +206,59 @@ func (b *WriteAtBuffer) Bytes() []byte { defer b.m.Unlock() return b.buf } + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index c4d155c8e..08127d60d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.14.31" +const SDKVersion = "1.34.0" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go similarity index 86% rename from vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go rename to vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go index 8fdda5303..876dcb3fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -1,6 +1,6 @@ // +build !go1.7 -package aws +package context import "time" @@ -30,12 +30,11 @@ func (*emptyCtx) Value(key interface{}) interface{} { func (e *emptyCtx) String() string { switch e { - case backgroundCtx: + case BackgroundCtx: return "aws.BackgroundContext" } return "unknown empty Context" } -var ( - backgroundCtx = new(emptyCtx) -) +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 000000000..25ce0fe13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 000000000..8d462f77e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 000000000..3b0ca7afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 000000000..582c024ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 000000000..cf9fad81e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,356 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 000000000..24df543d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 000000000..457287019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 000000000..da7a4049c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 000000000..18f3fe893 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 000000000..305999d29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 000000000..94841c324 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go new file mode 100644 index 000000000..0b9b0dfce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 000000000..6c443988b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 000000000..44898eed0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 000000000..810ec7f08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 000000000..f4651da2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 000000000..b1d93a33d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..7da8a49ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE similarity index 100% rename from vendor/golang.org/x/tools/LICENSE rename to vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..14ad0c589 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 000000000..e045f38d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go index ecc7bf82f..151054971 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -101,7 +101,7 @@ func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { } headers.Set(h.Name, value) } - (*hs) = decodedHeaders(headers) + *hs = decodedHeaders(headers) return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go index 4b972b2d6..474339391 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -21,10 +21,24 @@ type Decoder struct { // NewDecoder initializes and returns a Decoder for decoding event // stream messages from the reader provided. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ r: r, } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } } // Decode attempts to decode a single message from the event stream reader. @@ -40,6 +54,15 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { }() } + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { crc := crc32.New(crc32IEEETable) hashReader := io.TeeReader(reader, crc) @@ -72,12 +95,6 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { return m, nil } -// UseLogger specifies the Logger that that the decoder should use to log the -// message decode to. -func (d *Decoder) UseLogger(logger aws.Logger) { - d.logger = logger -} - func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { w := bytes.NewBuffer(nil) defer func() { logger.Log(w.String()) }() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go index 150a60981..ffade3bc0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -3,61 +3,107 @@ package eventstream import ( "bytes" "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" "hash" "hash/crc32" "io" + + "github.com/aws/aws-sdk-go/aws" ) // Encoder provides EventStream message encoding. type Encoder struct { - w io.Writer + w io.Writer + logger aws.Logger headersBuf *bytes.Buffer } // NewEncoder initializes and returns an Encoder to encode Event Stream // messages to an io.Writer. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ w: w, headersBuf: bytes.NewBuffer(nil), } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } } // Encode encodes a single EventStream message to the io.Writer the Encoder // was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(msg Message) error { +func (e *Encoder) Encode(msg Message) (err error) { e.headersBuf.Reset() - err := encodeHeaders(e.headersBuf, msg.Headers) - if err != nil { + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { return err } crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(e.w, crc) + hashWriter := io.MultiWriter(writer, crc) headersLen := uint32(e.headersBuf.Len()) payloadLen := uint32(len(msg.Payload)) - if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { return err } if headersLen > 0 { - if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { return err } } if payloadLen > 0 { - if _, err := hashWriter.Write(msg.Payload); err != nil { + if _, err = hashWriter.Write(msg.Payload); err != nil { return err } } msgCRC := crc.Sum32() - return binary.Write(e.w, binary.BigEndian, msgCRC) + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) } func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { @@ -86,7 +132,9 @@ func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) return nil } -func encodeHeaders(w io.Writer, headers Headers) error { +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { for _, h := range headers { hn := headerName{ Len: uint8(len(h.Name)), diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go index 5ea5a988b..34c2e89d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -1,6 +1,9 @@ package eventstreamapi -import "fmt" +import ( + "fmt" + "sync" +) type messageError struct { code string @@ -22,3 +25,53 @@ func (e messageError) Error() string { func (e messageError) OrigErr() error { return nil } + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go similarity index 76% rename from vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go index 97937c8e5..0e4aa42f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -2,9 +2,7 @@ package eventstreamapi import ( "fmt" - "io" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" ) @@ -15,27 +13,8 @@ type Unmarshaler interface { UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error } -// EventStream headers with specific meaning to async API functionality. -const ( - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) - // EventReader provides reading from the EventStream of an reader. type EventReader struct { - reader io.ReadCloser decoder *eventstream.Decoder unmarshalerForEventType func(string) (Unmarshaler, error) @@ -47,27 +26,18 @@ type EventReader struct { // NewEventReader returns a EventReader built from the reader and unmarshaler // provided. Use ReadStream method to start reading from the EventStream. func NewEventReader( - reader io.ReadCloser, + decoder *eventstream.Decoder, payloadUnmarshaler protocol.PayloadUnmarshaler, unmarshalerForEventType func(string) (Unmarshaler, error), ) *EventReader { return &EventReader{ - reader: reader, - decoder: eventstream.NewDecoder(reader), + decoder: decoder, payloadUnmarshaler: payloadUnmarshaler, unmarshalerForEventType: unmarshalerForEventType, payloadBuf: make([]byte, 10*1024), } } -// UseLogger instructs the EventReader to use the logger and log level -// specified. -func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { - if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { - r.decoder.UseLogger(logger) - } -} - // ReadEvent attempts to read a message from the EventStream and return the // unmarshaled event value that the message is for. // @@ -95,15 +65,27 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) { case EventMessageType: return r.unmarshalEventMessage(msg) case ExceptionMessageType: - err = r.unmarshalEventException(msg) - return nil, err + return nil, r.unmarshalEventException(msg) case ErrorMessageType: return nil, r.unmarshalErrorMessage(msg) default: - return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } } } +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + func (r *EventReader) unmarshalEventMessage( msg eventstream.Message, ) (event interface{}, err error) { @@ -174,11 +156,6 @@ func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) return msgErr } -// Close closes the EventReader's EventStream reader. -func (r *EventReader) Close() error { - return r.reader.Close() -} - // GetHeaderString returns the value of the header as a string. If the header // is not set or the value is not a string an error will be returned. func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 000000000..e46b8acc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 000000000..3a7ba5cd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 000000000..433bb1630 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 000000000..10a3823df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,109 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} + +//type EventEncoder struct { +// encoder Encoder +// ppayloadMarshaler protocol.PayloadMarshaler +// eventTypeFor func(Marshaler) (string, error) +//} +// +//func (e EventEncoder) Encode(event Marshaler) error { +// msg, err := e.marshal(event) +// if err != nil { +// return err +// } +// +// return w.encoder.Encode(msg) +//} +// +//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// +//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go index 3b44dde2f..f6f8c5674 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -52,6 +52,15 @@ func (hs *Headers) Del(name string) { } } +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + func decodeHeaders(r io.Reader) (Headers, error) { hs := Headers{} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go index e3fc0766a..9f509d8f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -461,6 +461,11 @@ func (v *TimestampValue) decode(r io.Reader) error { return nil } +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + func timeFromEpochMilli(t int64) time.Time { secs := t / 1e3 msec := t % 1e3 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go index 2dc012a66..f7427da03 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -27,7 +27,7 @@ func (m *Message) rawMessage() (rawMessage, error) { if len(m.Headers) > 0 { var headers bytes.Buffer - if err := encodeHeaders(&headers, m.Headers); err != nil { + if err := EncodeHeaders(&headers, m.Headers); err != nil { return rawMessage{}, err } raw.Headers = headers.Bytes() @@ -57,6 +57,20 @@ func (m *Message) rawMessage() (rawMessage, error) { return raw, nil } +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + type messagePrelude struct { Length uint32 HeadersLen uint32 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..d7d42db0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..864fb6704 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..5e9499699 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,282 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go index e21614a12..0ea0647a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -64,7 +64,7 @@ func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error metadata.ClientInfo{}, request.Handlers{}, nil, - &request.Operation{HTTPMethod: "GET"}, + &request.Operation{HTTPMethod: "PUT"}, v, nil, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 000000000..9d521dcb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 60e5b09d5..d40346a77 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,7 +1,7 @@ // Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" @@ -21,7 +21,7 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index e0f4d5a54..9231e95d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -1,6 +1,6 @@ package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go import ( "encoding/xml" @@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index f21429617..831b0110c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,65 +2,68 @@ package query import ( "encoding/xml" - "io/ioutil" + "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` +type xmlResponseError struct { + xmlErrorResponse } -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) - return - } - - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, - reqID, + r.RequestID, ) return } - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID } - // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index b34f5258a..1301b149d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -25,6 +25,8 @@ var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") +var byteSliceType = reflect.TypeOf([]byte{}) + func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + var err error switch field.Tag.Get("location") { case "headers": // header maps @@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -152,9 +162,12 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect. if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + header.Add(name, str) return nil @@ -167,11 +180,13 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) - header.Add(prefix+key.String(), str) + header.Add(prefix+keyStr, str) } return nil } @@ -181,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -214,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } query.Set(name, str) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 33fd53b12..92f8b4d9a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" "github.com/aws/aws-sdk-go/private/protocol" ) @@ -28,7 +29,9 @@ var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta func Unmarshal(r *request.Request) { if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } } } @@ -40,12 +43,21 @@ func UnmarshalMeta(r *request.Request) { r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") } if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } } } -func unmarshalBody(r *request.Request, v reflect.Value) { +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) @@ -57,35 +69,38 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + payload.Set(reflect.ValueOf(b)) + case *string: defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + default: switch payload.Type().String() { case "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", + return awserr.New(request.ErrCodeSerialization, "failed to read response body", err) - return } payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -94,9 +109,11 @@ func unmarshalBody(r *request.Request, v reflect.Value) { } } } + + return nil } -func unmarshalLocationElements(r *request.Request, v reflect.Value) { +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { for i := 0; i < v.NumField(); i++ { m, field := v.Field(i), v.Type().Field(i) if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { @@ -111,26 +128,25 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { switch field.Tag.Get("location") { case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + unmarshalStatusCode(m, resp.StatusCode) + case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + case "headers": prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } - if r.Error != nil { - return - } } + + return nil } func unmarshalStatusCode(v reflect.Value, statusCode int) { @@ -145,29 +161,45 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } } -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +210,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index 7bdf4c853..b1ae36487 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -2,8 +2,8 @@ // requests and responses. package restxml -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go import ( "bytes" @@ -36,7 +36,12 @@ func Build(r *request.Request) { var buf bytes.Buffer err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) return } r.SetBufferBody(buf.Bytes()) @@ -50,7 +55,12 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } else { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f8..d2f6dae53 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,11 +52,12 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) default: panic("unknown timestamp format name, " + name) } @@ -62,10 +73,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go index da1a68111..f614ef898 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -19,3 +19,9 @@ func UnmarshalDiscardBody(r *request.Request) { io.Copy(ioutil.Discard, r.HTTPResponse.Body) r.HTTPResponse.Body.Close() } + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 000000000..cc857f136 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 07764c862..09ad95159 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -8,6 +8,7 @@ import ( "reflect" "sort" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/private/protocol" @@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle return nil } + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + t := tag.Get("type") if t == "" { switch value.Kind() { @@ -87,15 +96,13 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle } } -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested // types are converted to XMLNodes also. func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { if !value.IsValid() { return nil } - fieldAdded := false - // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) @@ -123,6 +130,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl child.Attr = append(child.Attr, ns) } + var payloadFields, nonPayloadFields int + t := value.Type() for i := 0; i < value.NumField(); i++ { member := elemOf(value.Field(i)) @@ -137,8 +146,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ continue } + payloadFields++ if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -153,11 +164,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if err := b.buildValue(member, child, mTag); err != nil { return err } - - fieldAdded = true } - if fieldAdded { // only append this child if we have one ore more valid members + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { current.AddChild(child) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 000000000..c1a511851 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index ff1ef6830..107c053f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -1,6 +1,7 @@ package xmlutil import ( + "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -10,9 +11,27 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol" ) +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. @@ -45,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { // parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect // will be used to determine the type from r. func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + rtype := r.Type() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() // check kind of actual element type diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 515ce1521..42f71648e 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) { // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) if node.Text != "" { e.EncodeToken(xml.CharData([]byte(node.Text))) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 0e999ca33..8aadf3c56 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "sync" - "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" @@ -15,11 +14,13 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) const opAbortMultipartUpload = "AbortMultipartUpload" @@ -27,7 +28,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -66,11 +67,31 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// Aborts a multipart upload. +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. // // To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// operation and ensure that the parts list is empty. +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -110,7 +131,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -151,6 +172,65 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // Completes a multipart upload by assembling previously uploaded parts. // +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this operation to complete the upload. Upon receiving this request, +// Amazon S3 concatenates all the parts in ascending order by part number to +// create a new object. In the Complete Multipart Upload request, you must provide +// the parts list. You must ensure that the parts list is complete. This operation +// concatenates the parts that you provide in the list. For each part in the +// list, you must provide the part number and the ETag value, returned after +// that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// CompleteMultipartUpload has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -184,7 +264,7 @@ const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -225,6 +305,153 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Creates a copy of an object that is already stored in Amazon S3. // +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, to copy an object greater than 5 GB, you must use the +// multipart upload Upload Part - Copy API. For more information, see Copy Object +// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (https://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Encryption +// +// The source object that you are copying can be encrypted or unencrypted. The +// source object can be encrypted with server-side encryption using AWS managed +// encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption +// key. With server-side encryption, Amazon S3 encrypts your data as it writes +// it to disks in its data centers and decrypts the data when you access it. +// +// You can optionally use the appropriate encryption-related headers to request +// server-side encryption for the target object. You have the option to provide +// your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form +// of server-side encryption that was used to encrypt the source object. You +// can even request encryption if the source object was not encrypted. For more +// information about server-side encryption, see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// You can use the CopyObject operation to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// +// The following operations are related to CopyObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -235,7 +462,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" // The source object of the COPY operation is not in the active tier and is -// only stored in Amazon Glacier. +// only stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -264,7 +491,7 @@ const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -303,7 +530,67 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new bucket. +// Creates a new bucket. To create a bucket, you must register with Amazon S3 +// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests +// are never allowed to create buckets. By creating the bucket, you become the +// bucket owner. +// +// Not every string is an acceptable bucket name. For information on bucket +// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -318,6 +605,11 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // by all users of the system. Please select a different name and try again. // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { @@ -346,7 +638,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -385,13 +677,155 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// Initiates a multipart upload and returns an upload ID. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. +// +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -426,7 +860,7 @@ const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -460,15 +894,20 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucket API operation for Amazon Simple Storage Service. // -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// Deletes the bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -503,7 +942,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -537,8 +976,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt output = &DeleteBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -547,6 +985,23 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -580,7 +1035,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -614,8 +1069,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request output = &DeleteBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -623,6 +1077,19 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // Deletes the cors configuration information set for the bucket. // +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -656,7 +1123,7 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption" // DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketEncryption operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -690,14 +1157,29 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( output = &DeleteBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// Deletes the server-side encryption configuration from the bucket. +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -732,7 +1214,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -766,8 +1248,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent output = &DeleteBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -776,6 +1257,23 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -809,7 +1307,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -843,14 +1341,33 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re output = &DeleteBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deletes the lifecycle configuration from the bucket. +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -885,7 +1402,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -919,15 +1436,34 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC output = &DeleteBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // -// Deletes a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -962,7 +1498,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -996,14 +1532,35 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// Deletes the policy from the bucket. +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1038,7 +1595,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1072,8 +1629,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) output = &DeleteBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1081,6 +1637,24 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // Deletes the replication configuration from the bucket. // +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 Developer Guide. +// +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1114,7 +1688,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1148,8 +1722,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1157,6 +1730,16 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // Deletes the tags from the bucket. // +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1190,7 +1773,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1224,14 +1807,32 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r output = &DeleteBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration from the bucket. +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1266,7 +1867,7 @@ const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1309,6 +1910,30 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // marker, which becomes the latest version of the object. If there isn't a // null version, Amazon S3 does not remove any objects. // +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// The following operation is related to DeleteObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1342,7 +1967,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1381,7 +2006,21 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // DeleteObjectTagging API operation for Amazon Simple Storage Service. // -// Removes the tag-set from an existing object. +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1416,7 +2055,7 @@ const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1450,13 +2089,57 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque output = &DeleteObjectsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // DeleteObjects API operation for Amazon Simple Storage Service. // // This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1486,12 +2169,101 @@ func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput return out, req.Send() } +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1530,7 +2302,33 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Returns the accelerate configuration of a bucket. +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1565,7 +2363,7 @@ const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1604,7 +2402,15 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// Gets the access control policy for the bucket. +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1639,7 +2445,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1678,8 +2484,27 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// Gets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1714,7 +2539,7 @@ const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1753,7 +2578,20 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the cors configuration for the bucket. +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1788,7 +2626,7 @@ const opGetBucketEncryption = "GetBucketEncryption" // GetBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the GetBucketEncryption operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1827,7 +2665,21 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the server-side encryption configuration of a bucket. +// Returns the default encryption configuration for an Amazon S3 bucket. For +// information about the Amazon S3 default encryption feature, see Amazon S3 +// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1862,7 +2714,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1901,8 +2753,25 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Returns an inventory configuration (identified by the inventory ID) from -// the bucket. +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1937,7 +2806,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1958,6 +2827,8 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -1979,7 +2850,34 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deprecated, see the GetBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1988,6 +2886,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycle for usage and error information. // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) return out, req.Send() @@ -2002,6 +2902,8 @@ func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifec // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) req.SetContext(ctx) @@ -2014,7 +2916,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2053,11 +2955,41 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Returns the lifecycle configuration information set on the bucket. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are still using previous version +// of the lifecycle configuration, it works. For the earlier API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycleConfiguration for usage and error information. @@ -2088,7 +3020,7 @@ const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2127,7 +3059,17 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // GetBucketLocation API operation for Amazon Simple Storage Service. // -// Returns the region the bucket resides in. +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2162,7 +3104,7 @@ const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2204,6 +3146,12 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // Returns the logging status of a bucket and the permissions users have to // view and modify that status. To use GET, you must be the bucket owner. // +// The following operations are related to GetBucketLogging: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2237,7 +3185,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2277,7 +3225,26 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2312,7 +3279,7 @@ const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2333,6 +3300,8 @@ const opGetBucketNotification = "GetBucketNotification" // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -2354,7 +3323,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// Deprecated, see the GetBucketNotificationConfiguration operation. +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2363,6 +3332,8 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotification for usage and error information. // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) return out, req.Send() @@ -2377,6 +3348,8 @@ func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequ // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) req.SetContext(ctx) @@ -2389,7 +3362,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2430,6 +3403,22 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2463,7 +3452,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2502,7 +3491,26 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // GetBucketPolicy API operation for Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2532,12 +3540,102 @@ func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyI return out, req.Send() } +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2578,6 +3676,30 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // // Returns the replication configuration of a bucket. // +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2611,7 +3733,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2650,7 +3772,13 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // GetBucketRequestPayment API operation for Amazon Simple Storage Service. // -// Returns the request payment configuration of a bucket. +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2685,7 +3813,7 @@ const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2726,6 +3854,21 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // Returns the tag set associated with the bucket. // +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2759,7 +3902,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2800,6 +3943,20 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // Returns the versioning state of a bucket. // +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2833,7 +3990,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2872,7 +4029,21 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // GetBucketWebsite API operation for Amazon Simple Storage Service. // -// Returns the website configuration for a bucket. +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2907,7 +4078,7 @@ const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2946,7 +4117,132 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // GetObject API operation for Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// +// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE +// storage classes, before you can retrieve the object you must first restore +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2986,7 +4282,7 @@ const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3025,7 +4321,21 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // -// Returns the access control list (ACL) of an object. +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3060,3248 +4370,5781 @@ func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, return out, req.Send() } -const opGetObjectTagging = "GetObjectTagging" +const opGetObjectLegalHold = "GetObjectLegalHold" -// GetObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTagging operation. The "output" return +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetObjectTagging for more information on using the GetObjectTagging +// See GetObjectLegalHold for more information on using the GetObjectLegalHold // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetObjectTaggingRequest method. -// req, resp := client.GetObjectTaggingRequest(params) +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { op := &request.Operation{ - Name: opGetObjectTagging, + Name: opGetObjectLegalHold, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &GetObjectTaggingInput{} + input = &GetObjectLegalHoldInput{} } - output = &GetObjectTaggingOutput{} + output = &GetObjectLegalHoldOutput{} req = c.newRequest(op, input, output) return } -// GetObjectTagging API operation for Amazon Simple Storage Service. +// GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) return out, req.Send() } -// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of // the ability to pass a context and additional request options. // -// See GetObjectTagging for details on how to use this API operation. +// See GetObjectLegalHold for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetObjectTorrent = "GetObjectTorrent" +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetObjectTorrent for more information on using the GetObjectTorrent +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opGetObjectTorrent, + Name: opGetObjectLockConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &GetObjectTorrentInput{} + input = &GetObjectLockConfigurationInput{} } - output = &GetObjectTorrentOutput{} + output = &GetObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) return } -// GetObjectTorrent API operation for Amazon Simple Storage Service. +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Return torrent files from a bucket. +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTorrent for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) return out, req.Send() } -// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of // the ability to pass a context and additional request options. // -// See GetObjectTorrent for details on how to use this API operation. +// See GetObjectLockConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opHeadBucket = "HeadBucket" +const opGetObjectRetention = "GetObjectRetention" -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See HeadBucket for more information on using the HeadBucket +// See GetObjectRetention for more information on using the GetObjectRetention // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", } if input == nil { - input = &HeadBucketInput{} + input = &GetObjectRetentionInput{} } - output = &HeadBucketOutput{} + output = &GetObjectRetentionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// HeadBucket API operation for Amazon Simple Storage Service. +// GetObjectRetention API operation for Amazon Simple Storage Service. // -// This operation is useful to determine if a bucket exists and you have permission -// to access it. +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadBucket for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) return out, req.Send() } -// HeadBucketWithContext is the same as HeadBucket with the addition of +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of // the ability to pass a context and additional request options. // -// See HeadBucket for details on how to use this API operation. +// See GetObjectRetention for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opHeadObject = "HeadObject" +const opGetObjectTagging = "GetObjectTagging" -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See HeadObject for more information on using the HeadObject +// See GetObjectTagging for more information on using the GetObjectTagging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &HeadObjectInput{} + input = &GetObjectTaggingInput{} } - output = &HeadObjectOutput{} + output = &GetObjectTaggingOutput{} req = c.newRequest(op, input, output) return } -// HeadObject API operation for Amazon Simple Storage Service. +// GetObjectTagging API operation for Amazon Simple Storage Service. // -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. // -// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses -// for more information on returned errors. +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) return out, req.Send() } -// HeadObjectWithContext is the same as HeadObject with the addition of +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of // the ability to pass a context and additional request options. // -// See HeadObject for details on how to use this API operation. +// See GetObjectTagging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" +const opGetObjectTorrent = "GetObjectTorrent" -// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// See GetObjectTorrent for more information on using the GetObjectTorrent // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. -// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ - Name: opListBucketAnalyticsConfigurations, + Name: opGetObjectTorrent, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", + HTTPPath: "/{Bucket}/{Key+}?torrent", } if input == nil { - input = &ListBucketAnalyticsConfigurationsInput{} + input = &GetObjectTorrentInput{} } - output = &ListBucketAnalyticsConfigurationsOutput{} + output = &GetObjectTorrentOutput{} req = c.newRequest(op, input, output) return } -// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Return torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size and +// that are not encrypted using server-side encryption with customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// The following operation is related to GetObjectTorrent: // -// Lists the analytics configurations for the bucket. +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketAnalyticsConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) return out, req.Send() } -// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of // the ability to pass a context and additional request options. // -// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// See GetObjectTorrent for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" +const opGetPublicAccessBlock = "GetPublicAccessBlock" -// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. -// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { op := &request.Operation{ - Name: opListBucketInventoryConfigurations, + Name: opGetPublicAccessBlock, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", + HTTPPath: "/{Bucket}?publicAccessBlock", } if input == nil { - input = &ListBucketInventoryConfigurationsInput{} + input = &GetPublicAccessBlockInput{} } - output = &ListBucketInventoryConfigurationsOutput{} + output = &GetPublicAccessBlockOutput{} req = c.newRequest(op, input, output) return } -// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). // -// Returns a list of inventory configurations for the bucket. +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketInventoryConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) return out, req.Send() } -// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of // the ability to pass a context and additional request options. // -// See ListBucketInventoryConfigurations for details on how to use this API operation. +// See GetPublicAccessBlock for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" +const opHeadBucket = "HeadBucket" -// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// See HeadBucket for more information on using the HeadBucket // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. -// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ - Name: opListBucketMetricsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", } if input == nil { - input = &ListBucketMetricsConfigurationsInput{} + input = &HeadBucketInput{} } - output = &ListBucketMetricsConfigurationsOutput{} + output = &HeadBucketOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// HeadBucket API operation for Amazon Simple Storage Service. // -// Lists the metrics configurations for the bucket. +// This operation is useful to determine if a bucket exists and you have permission +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might return responses +// such as 404 Not Found and 403 Forbidden. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketMetricsConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) return out, req.Send() } -// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// HeadBucketWithContext is the same as HeadBucket with the addition of // the ability to pass a context and additional request options. // -// See ListBucketMetricsConfigurations for details on how to use this API operation. +// See HeadBucket for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBuckets = "ListBuckets" +const opHeadObject = "HeadObject" -// ListBucketsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuckets operation. The "output" return +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBuckets for more information on using the ListBuckets +// See HeadObject for more information on using the HeadObject // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBucketsRequest method. -// req, resp := client.ListBucketsRequest(params) +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ - Name: opListBuckets, - HTTPMethod: "GET", - HTTPPath: "/", + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", } if input == nil { - input = &ListBucketsInput{} + input = &HeadObjectInput{} } - output = &ListBucketsOutput{} + output = &HeadObjectOutput{} req = c.newRequest(op, input, output) return } -// ListBuckets API operation for Amazon Simple Storage Service. +// HeadObject API operation for Amazon Simple Storage Service. // -// Returns a list of all buckets owned by the authenticated sender of the request. +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBuckets for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) return out, req.Send() } -// ListBucketsWithContext is the same as ListBuckets with the addition of +// HeadObjectWithContext is the same as HeadObject with the addition of // the ability to pass a context and additional request options. // -// See ListBuckets for details on how to use this API operation. +// See HeadObject for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListMultipartUploads = "ListMultipartUploads" +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" -// ListMultipartUploadsRequest generates a "aws/request.Request" representing the -// client's request for the ListMultipartUploads operation. The "output" return +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListMultipartUploads for more information on using the ListMultipartUploads +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListMultipartUploadsRequest method. -// req, resp := client.ListMultipartUploadsRequest(params) +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { op := &request.Operation{ - Name: opListMultipartUploads, + Name: opListBucketAnalyticsConfigurations, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}?analytics", } if input == nil { - input = &ListMultipartUploadsInput{} + input = &ListBucketAnalyticsConfigurationsInput{} } - output = &ListMultipartUploadsOutput{} + output = &ListBucketAnalyticsConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// ListMultipartUploads API operation for Amazon Simple Storage Service. +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). // -// This operation lists in-progress multipart uploads. +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListMultipartUploads for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) return out, req.Send() } -// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of // the ability to pass a context and additional request options. // -// See ListMultipartUploads for details on how to use this API operation. +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListMultipartUploads method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListMultipartUploads operation. -// pageNum := 0 -// err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { - return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListMultipartUploadsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListMultipartUploadsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListObjectVersions = "ListObjectVersions" +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" -// ListObjectVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjectVersions operation. The "output" return +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListObjectVersions for more information on using the ListObjectVersions +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListObjectVersionsRequest method. -// req, resp := client.ListObjectVersionsRequest(params) +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ - Name: opListObjectVersions, + Name: opListBucketInventoryConfigurations, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}?inventory", } if input == nil { - input = &ListObjectVersionsInput{} + input = &ListBucketInventoryConfigurationsInput{} } - output = &ListObjectVersionsOutput{} + output = &ListBucketInventoryConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// ListObjectVersions API operation for Amazon Simple Storage Service. +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// Returns metadata about all of the versions of objects in a bucket. +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectVersions for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) return out, req.Send() } -// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of // the ability to pass a context and additional request options. // -// See ListObjectVersions for details on how to use this API operation. +// See ListBucketInventoryConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectVersions operation. -// pageNum := 0 -// err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { - return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListObjects = "ListObjects" +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" -// ListObjectsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjects operation. The "output" return +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListObjects for more information on using the ListObjects +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListObjectsRequest method. -// req, resp := client.ListObjectsRequest(params) +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ - Name: opListObjects, + Name: opListBucketMetricsConfigurations, HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}?metrics", } if input == nil { - input = &ListObjectsInput{} + input = &ListBucketMetricsConfigurationsInput{} } - output = &ListObjectsOutput{} + output = &ListBucketMetricsConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// ListObjects API operation for Amazon Simple Storage Service. +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjects for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) return out, req.Send() } -// ListObjectsWithContext is the same as ListObjects with the addition of +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of // the ability to pass a context and additional request options. // -// See ListObjects for details on how to use this API operation. +// See ListBucketMetricsConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListObjectsPages iterates over the pages of a ListObjects operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListObjects method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListObjects operation. -// pageNum := 0 -// err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { - return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return } -// ListObjectsPagesWithContext same as ListObjectsPages except -// it takes a Context and allows setting request options on the pages. +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) - } - return p.Err() +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opListObjectsV2 = "ListObjectsV2" +const opListMultipartUploads = "ListMultipartUploads" -// ListObjectsV2Request generates a "aws/request.Request" representing the -// client's request for the ListObjectsV2 operation. The "output" return +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListObjectsV2 for more information on using the ListObjectsV2 +// See ListMultipartUploads for more information on using the ListMultipartUploads // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListObjectsV2Request method. -// req, resp := client.ListObjectsV2Request(params) +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ - Name: opListObjectsV2, + Name: opListMultipartUploads, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?list-type=2", + HTTPPath: "/{Bucket}?uploads", Paginator: &request.Paginator{ - InputTokens: []string{"ContinuationToken"}, - OutputTokens: []string{"NextContinuationToken"}, - LimitToken: "MaxKeys", - TruncationToken: "", + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", }, } if input == nil { - input = &ListObjectsV2Input{} + input = &ListMultipartUploadsInput{} } - output = &ListObjectsV2Output{} + output = &ListMultipartUploadsOutput{} req = c.newRequest(op, input, output) return } -// ListObjectsV2 API operation for Amazon Simple Storage Service. +// ListMultipartUploads API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectsV2 for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) return out, req.Send() } -// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of // the ability to pass a context and additional request options. // -// See ListObjectsV2 for details on how to use this API operation. +// See ListMultipartUploads for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListObjectsV2 method for more information on how to use this operation. +// See ListMultipartUploads method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 -// err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { - return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsV2Input + var inCpy *ListMultipartUploadsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListObjectsV2Request(inCpy) + req, _ := c.ListMultipartUploadsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opListParts = "ListParts" +const opListObjectVersions = "ListObjectVersions" -// ListPartsRequest generates a "aws/request.Request" representing the -// client's request for the ListParts operation. The "output" return +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListParts for more information on using the ListParts +// See ListObjectVersions for more information on using the ListObjectVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListPartsRequest method. -// req, resp := client.ListPartsRequest(params) +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ - Name: opListParts, + Name: opListObjectVersions, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?versions", Paginator: &request.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", TruncationToken: "IsTruncated", }, } if input == nil { - input = &ListPartsInput{} + input = &ListObjectVersionsInput{} } - output = &ListPartsOutput{} + output = &ListObjectVersionsOutput{} req = c.newRequest(op, input, output) return } -// ListParts API operation for Amazon Simple Storage Service. +// ListObjectVersions API operation for Amazon Simple Storage Service. // -// Lists the parts that have been uploaded for a specific multipart upload. +// Returns metadata about all of the versions of objects in a bucket. You can +// also use request parameters as selection criteria to return metadata about +// a subset of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListParts for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) return out, req.Send() } -// ListPartsWithContext is the same as ListParts with the addition of +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of // the ability to pass a context and additional request options. // -// See ListParts for details on how to use this API operation. +// See ListObjectVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListPartsPages iterates over the pages of a ListParts operation, +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListParts method for more information on how to use this operation. +// See ListObjectVersions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListParts operation. +// // Example iterating over at most 3 pages of a ListObjectVersions operation. // pageNum := 0 -// err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { - return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListPartsPagesWithContext same as ListPartsPages except +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListPartsInput + var inCpy *ListObjectVersionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListPartsRequest(inCpy) + req, _ := c.ListObjectVersionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" +const opListObjects = "ListObjects" -// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// See ListObjects for more information on using the ListObjects // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. -// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ - Name: opPutBucketAccelerateConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?accelerate", + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, } if input == nil { - input = &PutBucketAccelerateConfigurationInput{} + input = &ListObjectsInput{} } - output = &PutBucketAccelerateConfigurationOutput{} + output = &ListObjectsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// ListObjects API operation for Amazon Simple Storage Service. // -// Sets the accelerate configuration of an existing bucket. +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAccelerateConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) return out, req.Send() } -// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// ListObjectsWithContext is the same as ListObjects with the addition of // the ability to pass a context and additional request options. // -// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// See ListObjects for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketAcl = "PutBucketAcl" +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// PutBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAcl operation. The "output" return +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketAcl for more information on using the PutBucketAcl +// See ListObjectsV2 for more information on using the ListObjectsV2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketAclRequest method. -// req, resp := client.PutBucketAclRequest(params) +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ - Name: opPutBucketAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, } if input == nil { - input = &PutBucketAclInput{} + input = &ListObjectsV2Input{} } - output = &PutBucketAclOutput{} + output = &ListObjectsV2Output{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketAcl API operation for Amazon Simple Storage Service. +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// The following operations are related to ListObjectsV2: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// Sets the permissions on a bucket using access control lists (ACL). +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAcl for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) return out, req.Send() } -// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of // the ability to pass a context and additional request options. // -// See PutBucketAcl for details on how to use this API operation. +// See ListObjectsV2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} -// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// See ListParts for more information on using the ListParts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. -// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ - Name: opPutBucketAnalyticsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?analytics", + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, } if input == nil { - input = &PutBucketAnalyticsConfigurationInput{} + input = &ListPartsInput{} } - output = &PutBucketAnalyticsConfigurationOutput{} + output = &ListPartsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// ListParts API operation for Amazon Simple Storage Service. // -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number +// of parts returned is 1,000 parts. You can restrict the number of parts returned +// by specifying the max-parts request parameter. If your multipart upload consists +// of more than 1,000 parts, the response returns an IsTruncated field with +// the value of true, and a NextPartNumberMarker element. In subsequent ListParts +// requests you can include the part-number-marker query string parameter and +// set its value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} -// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// ListPartsWithContext is the same as ListParts with the addition of // the ability to pass a context and additional request options. // -// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// See ListParts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketCors = "PutBucketCors" +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// PutBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketCors operation. The "output" return +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketCors for more information on using the PutBucketCors +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketCorsRequest method. -// req, resp := client.PutBucketCorsRequest(params) +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketCors, + Name: opPutBucketAccelerateConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", + HTTPPath: "/{Bucket}?accelerate", } if input == nil { - input = &PutBucketCorsInput{} + input = &PutBucketAccelerateConfigurationInput{} } - output = &PutBucketCorsOutput{} + output = &PutBucketAccelerateConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketCors API operation for Amazon Simple Storage Service. +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// operation returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). // -// Sets the cors configuration for a bucket. +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) return out, req.Send() } -// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketCors for details on how to use this API operation. +// See PutBucketAccelerateConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketEncryption = "PutBucketEncryption" +const opPutBucketAcl = "PutBucketAcl" -// PutBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketEncryption operation. The "output" return +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketEncryption for more information on using the PutBucketEncryption +// See PutBucketAcl for more information on using the PutBucketAcl // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketEncryptionRequest method. -// req, resp := client.PutBucketEncryptionRequest(params) +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption -func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ - Name: opPutBucketEncryption, + Name: opPutBucketAcl, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?encryption", + HTTPPath: "/{Bucket}?acl", } if input == nil { - input = &PutBucketEncryptionInput{} + input = &PutBucketAclInput{} } - output = &PutBucketEncryptionOutput{} + output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketEncryption API operation for Amazon Simple Storage Service. +// PutBucketAcl API operation for Amazon Simple Storage Service. // -// Creates a new server-side encryption configuration (or replaces an existing -// one, if present). +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption -func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { - req, out := c.PutBucketEncryptionRequest(input) +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) return out, req.Send() } -// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of // the ability to pass a context and additional request options. // -// See PutBucketEncryption for details on how to use this API operation. +// See PutBucketAcl for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { - req, out := c.PutBucketEncryptionRequest(input) +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" -// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketInventoryConfigurationRequest method. -// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketInventoryConfiguration, + Name: opPutBucketAnalyticsConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?inventory", + HTTPPath: "/{Bucket}?analytics", } if input == nil { - input = &PutBucketInventoryConfigurationInput{} + input = &PutBucketAnalyticsConfigurationInput{} } - output = &PutBucketInventoryConfigurationOutput{} + output = &PutBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// Adds an inventory configuration (identified by the inventory ID) from the +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per // bucket. // +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) return out, req.Send() } -// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketInventoryConfiguration for details on how to use this API operation. +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketLifecycle = "PutBucketLifecycle" +const opPutBucketCors = "PutBucketCors" -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// See PutBucketCors for more information on using the PutBucketCors // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") - } +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ - Name: opPutBucketLifecycle, + Name: opPutBucketCors, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", + HTTPPath: "/{Bucket}?cors", } if input == nil { - input = &PutBucketLifecycleInput{} + input = &PutBucketCorsInput{} } - output = &PutBucketLifecycleOutput{} + output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// PutBucketCors API operation for Amazon Simple Storage Service. // -// Deprecated, see the PutBucketLifecycleConfiguration operation. +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) return out, req.Send() } -// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of // the ability to pass a context and additional request options. // -// See PutBucketLifecycle for details on how to use this API operation. +// See PutBucketCors for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" +const opPutBucketEncryption = "PutBucketEncryption" -// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// See PutBucketEncryption for more information on using the PutBucketEncryption // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. -// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { op := &request.Operation{ - Name: opPutBucketLifecycleConfiguration, + Name: opPutBucketEncryption, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", + HTTPPath: "/{Bucket}?encryption", } if input == nil { - input = &PutBucketLifecycleConfigurationInput{} + input = &PutBucketEncryptionInput{} } - output = &PutBucketLifecycleConfigurationOutput{} + output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// This implementation of the PUT operation uses the encryption subresource +// to set the default encryption state of an existing bucket. +// +// This implementation of the PUT operation sets default encryption for a bucket +// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS +// customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 +// default encryption feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). // -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycleConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) return out, req.Send() } -// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of // the ability to pass a context and additional request options. // -// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// See PutBucketEncryption for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketLogging = "PutBucketLogging" +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketLogging for more information on using the PutBucketLogging +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketLogging, + Name: opPutBucketInventoryConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", + HTTPPath: "/{Bucket}?inventory", } if input == nil { - input = &PutBucketLoggingInput{} + input = &PutBucketInventoryConfigurationInput{} } - output = &PutBucketLoggingOutput{} + output = &PutBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketLogging API operation for Amazon Simple Storage Service. +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of -// a bucket, you must be the bucket owner. +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLogging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) return out, req.Send() } -// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketLogging for details on how to use this API operation. +// See PutBucketInventoryConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" +const opPutBucketLifecycle = "PutBucketLifecycle" -// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// See PutBucketLifecycle for more information on using the PutBucketLifecycle // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketMetricsConfigurationRequest method. -// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } op := &request.Operation{ - Name: opPutBucketMetricsConfiguration, + Name: opPutBucketLifecycle, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?metrics", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &PutBucketMetricsConfigurationInput{} + input = &PutBucketLifecycleInput{} } - output = &PutBucketMetricsConfigurationOutput{} + output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) return out, req.Send() } -// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of // the ability to pass a context and additional request options. // -// See PutBucketMetricsConfiguration for details on how to use this API operation. +// See PutBucketLifecycle for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketNotification = "PutBucketNotification" +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" -// PutBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotification operation. The "output" return +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketNotification for more information on using the PutBucketNotification +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketNotificationRequest method. -// req, resp := client.PutBucketNotificationRequest(params) +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") - } +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketNotification, + Name: opPutBucketLifecycleConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &PutBucketNotificationInput{} + input = &PutBucketLifecycleConfigurationInput{} } - output = &PutBucketNotificationOutput{} + output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketNotification API operation for Amazon Simple Storage Service. +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject // -// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotification for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) return out, req.Send() } -// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketNotification for details on how to use this API operation. +// See PutBucketLifecycleConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" +const opPutBucketLogging = "PutBucketLogging" -// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// See PutBucketLogging for more information on using the PutBucketLogging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketNotificationConfigurationRequest method. -// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ - Name: opPutBucketNotificationConfiguration, + Name: opPutBucketLogging, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", + HTTPPath: "/{Bucket}?logging", } if input == nil { - input = &PutBucketNotificationConfigurationInput{} + input = &PutBucketLoggingInput{} } - output = &PutBucketNotificationConfigurationOutput{} + output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of +// a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). +// +// The following operations are related to PutBucketLogging: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// Enables notifications of specified events for a bucket. +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotificationConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) return out, req.Send() } -// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of // the ability to pass a context and additional request options. // -// See PutBucketNotificationConfiguration for details on how to use this API operation. +// See PutBucketLogging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketPolicy = "PutBucketPolicy" +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketPolicy for more information on using the PutBucketPolicy +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketPolicy, + Name: opPutBucketMetricsConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", + HTTPPath: "/{Bucket}?metrics", } if input == nil { - input = &PutBucketPolicyInput{} + input = &PutBucketMetricsConfigurationInput{} } - output = &PutBucketPolicyOutput{} + output = &PutBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketPolicy API operation for Amazon Simple Storage Service. +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: // -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) return out, req.Send() } -// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketPolicy for details on how to use this API operation. +// See PutBucketMetricsConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketReplication = "PutBucketReplication" +const opPutBucketNotification = "PutBucketNotification" -// PutBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketReplication operation. The "output" return +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketReplication for more information on using the PutBucketReplication +// See PutBucketNotification for more information on using the PutBucketNotification // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketReplicationRequest method. -// req, resp := client.PutBucketReplicationRequest(params) +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } op := &request.Operation{ - Name: opPutBucketReplication, + Name: opPutBucketNotification, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", + HTTPPath: "/{Bucket}?notification", } if input == nil { - input = &PutBucketReplicationInput{} + input = &PutBucketNotificationInput{} } - output = &PutBucketReplicationOutput{} + output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketReplication API operation for Amazon Simple Storage Service. +// PutBucketNotification API operation for Amazon Simple Storage Service. // -// Creates a new replication configuration (or replaces an existing one, if -// present). +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) return out, req.Send() } -// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of // the ability to pass a context and additional request options. // -// See PutBucketReplication for details on how to use this API operation. +// See PutBucketNotification for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketRequestPayment = "PutBucketRequestPayment" +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" -// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketRequestPayment operation. The "output" return +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketRequestPaymentRequest method. -// req, resp := client.PutBucketRequestPaymentRequest(params) +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ - Name: opPutBucketRequestPayment, + Name: opPutBucketNotificationConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", + HTTPPath: "/{Bucket}?notification", } if input == nil { - input = &PutBucketRequestPaymentInput{} + input = &PutBucketNotificationConfigurationInput{} } - output = &PutBucketRequestPaymentOutput{} + output = &PutBucketNotificationConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following operation is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketRequestPayment for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) return out, req.Send() } -// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutBucketRequestPayment for details on how to use this API operation. +// See PutBucketNotificationConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketTagging = "PutBucketTagging" +const opPutBucketPolicy = "PutBucketPolicy" -// PutBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketTagging operation. The "output" return +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketTagging for more information on using the PutBucketTagging +// See PutBucketPolicy for more information on using the PutBucketPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketTaggingRequest method. -// req, resp := client.PutBucketTaggingRequest(params) +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ - Name: opPutBucketTagging, + Name: opPutBucketPolicy, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", + HTTPPath: "/{Bucket}?policy", } if input == nil { - input = &PutBucketTaggingInput{} + input = &PutBucketPolicyInput{} } - output = &PutBucketTaggingOutput{} + output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketTagging API operation for Amazon Simple Storage Service. +// PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Sets the tags for a bucket. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) return out, req.Send() } -// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of // the ability to pass a context and additional request options. // -// See PutBucketTagging for details on how to use this API operation. +// See PutBucketPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketVersioning = "PutBucketVersioning" +const opPutBucketReplication = "PutBucketReplication" -// PutBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketVersioning operation. The "output" return +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketVersioning for more information on using the PutBucketVersioning +// See PutBucketReplication for more information on using the PutBucketReplication // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketVersioningRequest method. -// req, resp := client.PutBucketVersioningRequest(params) +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ - Name: opPutBucketVersioning, + Name: opPutBucketReplication, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", + HTTPPath: "/{Bucket}?replication", } if input == nil { - input = &PutBucketVersioningInput{} + input = &PutBucketReplicationInput{} } - output = &PutBucketVersioningOutput{} + output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketVersioning API operation for Amazon Simple Storage Service. +// PutBucketReplication API operation for Amazon Simple Storage Service. // -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 Developer Guide. +// +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket where you want +// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to +// replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. All rules must specify +// the same destination bucket. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// The latest version of the replication configuration XML is V2. XML V2 replication +// configurations are those that contain the Filter element for rules, and rules +// that specify S3 Replication Time Control (S3 RTC). In XML V2 replication +// configurations, Amazon S3 doesn't replicate delete markers. Therefore, you +// must set the DeleteMarkerReplication element to Disabled. For backward compatibility, +// Amazon S3 continues to support the XML V1 replication configuration. +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketVersioning for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) return out, req.Send() } -// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of // the ability to pass a context and additional request options. // -// See PutBucketVersioning for details on how to use this API operation. +// See PutBucketReplication for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketWebsite = "PutBucketWebsite" +const opPutBucketRequestPayment = "PutBucketRequestPayment" -// PutBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketWebsite operation. The "output" return +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketWebsite for more information on using the PutBucketWebsite +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketWebsiteRequest method. -// req, resp := client.PutBucketWebsiteRequest(params) +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ - Name: opPutBucketWebsite, + Name: opPutBucketRequestPayment, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", + HTTPPath: "/{Bucket}?requestPayment", } if input == nil { - input = &PutBucketWebsiteInput{} + input = &PutBucketRequestPaymentInput{} } - output = &PutBucketWebsiteOutput{} + output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutBucketWebsite API operation for Amazon Simple Storage Service. +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// Set the website configuration for a bucket. +// * GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) return out, req.Send() } -// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of // the ability to pass a context and additional request options. // -// See PutBucketWebsite for details on how to use this API operation. +// See PutBucketRequestPayment for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObject = "PutObject" +const opPutBucketTagging = "PutBucketTagging" -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObject for more information on using the PutObject +// See PutBucketTagging for more information on using the PutBucketTagging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ - Name: opPutObject, + Name: opPutBucketTagging, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?tagging", } if input == nil { - input = &PutObjectInput{} + input = &PutBucketTaggingInput{} } - output = &PutObjectOutput{} + output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutObject API operation for Amazon Simple Storage Service. +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: // -// Adds an object to a bucket. +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) return out, req.Send() } -// PutObjectWithContext is the same as PutObject with the addition of +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of // the ability to pass a context and additional request options. // -// See PutObject for details on how to use this API operation. +// See PutBucketTagging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObjectAcl = "PutObjectAcl" +const opPutBucketVersioning = "PutBucketVersioning" -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectAcl for more information on using the PutObjectAcl +// See PutBucketVersioning for more information on using the PutBucketVersioning // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ - Name: opPutObjectAcl, + Name: opPutBucketVersioning, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", + HTTPPath: "/{Bucket}?versioning", } if input == nil { - input = &PutObjectAclInput{} + input = &PutBucketVersioningInput{} } - output = &PutObjectAclOutput{} + output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutObjectAcl API operation for Amazon Simple Storage Service. +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. // -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectAcl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) return out, req.Send() } -// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of // the ability to pass a context and additional request options. // -// See PutObjectAcl for details on how to use this API operation. +// See PutBucketVersioning for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObjectTagging = "PutObjectTagging" +const opPutBucketWebsite = "PutBucketWebsite" -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectTagging for more information on using the PutObjectTagging +// See PutBucketWebsite for more information on using the PutBucketWebsite // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ - Name: opPutObjectTagging, + Name: opPutBucketWebsite, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}?website", } if input == nil { - input = &PutObjectTaggingInput{} + input = &PutBucketWebsiteInput{} } - output = &PutObjectTaggingOutput{} + output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// PutObjectTagging API operation for Amazon Simple Storage Service. +// PutBucketWebsite API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. // -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - return out, req.Send() -} - -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of -// the ability to pass a context and additional request options. +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. // -// See PutObjectTagging for details on how to use this API operation. +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRestoreObject = "RestoreObject" +const opPutObject = "PutObject" -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RestoreObject for more information on using the RestoreObject +// See PutObject for more information on using the PutObject // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", } if input == nil { - input = &RestoreObjectInput{} + input = &PutObjectInput{} } - output = &RestoreObjectOutput{} + output = &PutObjectOutput{} req = c.newRequest(op, input, output) return } -// RestoreObject API operation for Amazon Simple Storage Service. +// PutObject API operation for Amazon Simple Storage Service. // -// Restores an archived copy of an object back into Amazon S3 +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys. For more information, +// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD storage class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different storage class. +// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) return out, req.Send() } -// RestoreObjectWithContext is the same as RestoreObject with the addition of +// PutObjectWithContext is the same as PutObject with the addition of // the ability to pass a context and additional request options. // -// See RestoreObject for details on how to use this API operation. +// See PutObject for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSelectObjectContent = "SelectObjectContent" +const opPutObjectAcl = "PutObjectAcl" -// SelectObjectContentRequest generates a "aws/request.Request" representing the -// client's request for the SelectObjectContent operation. The "output" return +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SelectObjectContent for more information on using the SelectObjectContent +// See PutObjectAcl for more information on using the PutObjectAcl // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SelectObjectContentRequest method. -// req, resp := client.SelectObjectContentRequest(params) +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ - Name: opSelectObjectContent, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", } if input == nil { - input = &SelectObjectContentInput{} + input = &PutObjectAclInput{} } - output = &SelectObjectContentOutput{} + output = &PutObjectAclOutput{} req = c.newRequest(op, input, output) - req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) - req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// SelectObjectContent API operation for Amazon Simple Storage Service. +// PutObjectAcl API operation for Amazon Simple Storage Service. // -// This operation filters the contents of an Amazon S3 object based on a simple -// Structured Query Language (SQL) statement. In the request, along with the -// SQL expression, you must also specify a data serialization format (JSON or -// CSV) of the object. Amazon S3 uses this to parse object data into records, -// and returns only records that match the specified SQL expression. You must -// also specify the data serialization format for the response. +// Uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket. You must have WRITE_ACP permission +// to set the ACL of an object. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation SelectObjectContent for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { - req, out := c.SelectObjectContentRequest(input) +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) return out, req.Send() } -// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of // the ability to pass a context and additional request options. // -// See SelectObjectContent for details on how to use this API operation. +// See PutObjectAcl for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { - req, out := c.SelectObjectContentRequest(input) +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadPart = "UploadPart" +const opPutObjectLegalHold = "PutObjectLegalHold" -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UploadPart for more information on using the UploadPart +// See PutObjectLegalHold for more information on using the PutObjectLegalHold // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { op := &request.Operation{ - Name: opUploadPart, + Name: opPutObjectLegalHold, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &UploadPartInput{} + input = &PutObjectLegalHoldInput{} } - output = &UploadPartOutput{} + output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// UploadPart API operation for Amazon Simple Storage Service. +// PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Uploads a part in a multipart upload. +// Applies a Legal Hold configuration to the specified object. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPart for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) return out, req.Send() } -// UploadPartWithContext is the same as UploadPart with the addition of +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of // the ability to pass a context and additional request options. // -// See UploadPart for details on how to use this API operation. +// See PutObjectLegalHold for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadPartCopy = "UploadPartCopy" +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UploadPartCopy for more information on using the UploadPartCopy +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opUploadPartCopy, + Name: opPutObjectLockConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &UploadPartCopyInput{} + input = &PutObjectLockConfigurationInput{} } - output = &UploadPartCopyOutput{} + output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } -// UploadPartCopy API operation for Amazon Simple Storage Service. +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. +// +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPartCopy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) return out, req.Send() } -// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of // the ability to pass a context and additional request options. // -// See UploadPartCopy for details on how to use this API operation. +// See PutObjectLockConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. -type AbortIncompleteMultipartUpload struct { - _ struct{} `type:"structure"` - - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. - DaysAfterInitiation *int64 `type:"integer"` -} +const opPutObjectRetention = "PutObjectRetention" -// String returns the string representation -func (s AbortIncompleteMultipartUpload) String() string { - return awsutil.Prettify(s) -} +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } -// GoString returns the string representation -func (s AbortIncompleteMultipartUpload) GoString() string { - return s.String() -} + if input == nil { + input = &PutObjectRetentionInput{} + } -// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. -func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { - s.DaysAfterInitiation = &v - return s + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return } -type AbortMultipartUploadInput struct { +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// Places an Object Retention configuration on an object. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// This operation performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing +// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring Archives +// +// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To +// access an archived object, you must first initiate a restore request. This +// restores a temporary copy of the archived object. In a restore request, you +// specify the number of days that you want the restored copy to exist. After +// the specified period, Amazon S3 deletes the temporary copy but the object +// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object +// was restored from. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// The time it takes restore jobs to finish depends on which storage class the +// object is being restored from and which data access tier you specify. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the GLACIER storage class when occasional urgent requests for +// a subset of archives are required. For all but the largest archived objects +// (250 MB+), data accessed using Expedited retrievals are typically made +// available within 1–5 minutes. Provisioned capacity ensures that retrieval +// capacity for Expedited retrievals is available when you need it. Expedited +// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE +// storage class. +// +// * Standard - S3 Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for the GLACIER +// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval +// option. S3 Standard retrievals typically complete within 3-5 hours from +// the GLACIER storage class and typically complete within 12 hours from +// the DEEP_ARCHIVE storage class. +// +// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval +// option, enabling you to retrieve large amounts, even petabytes, of data +// inexpensively in a day. Bulk retrievals typically complete within 5-12 +// hours from the GLACIER storage class and typically complete within 48 +// hours from the DEEP_ARCHIVE storage class. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. You upgrade the speed of an in-progress +// restoration by issuing another restore request to the same object, setting +// a new Tier request element. When issuing a request to upgrade the restore +// tier, you must choose a tier that is faster than the tier that the in-progress +// restore is using. You must not change any other parameters, such as the Days +// request element. For more information, see Upgrading the Speed of an In-Progress +// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object copy is not previously restored, then Amazon S3 returns +// 202 Accepted in the response. +// +// * If the object copy is previously restored, Amazon S3 returns 200 OK +// in the response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited +// retrievals are currently not available. Try again later. (Returned if +// there is insufficient capacity to process the Expedited request. This +// error applies only to Expedited retrievals and not to S3 Standard or Bulk +// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + + es := newSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon Simple Storage Service Developer Guide. For objects that +// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) . +// +// GetObject Support +// +// The SelectObjectContent operation does not support the following GetObject +// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// +// Related Resources +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +func newSelectObjectContentEventStream() *SelectObjectContentEventStream { + return &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier, that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. +type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key of the object for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Upload ID that identifies the multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -6322,6 +10165,9 @@ func (s *AbortMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -6369,6 +10215,20 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -6393,10 +10253,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. + // Specifies the transfer acceleration status of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -6416,12 +10279,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } +// Contains the elements that set the ACL permissions for an object per grantee. type AccessControlPolicy struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -6467,11 +10332,13 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { return s } -// Container for information regarding the access control for replicas. +// A container for information about access control for replicas. type AccessControlTranslation struct { _ struct{} `type:"structure"` - // The override value for the owner of the replica object. + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. // // Owner is a required field Owner *string `type:"string" required:"true" enum:"OwnerOverride"` @@ -6506,10 +10373,14 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation return s } +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. type AnalyticsAndOperator struct { _ struct{} `type:"structure"` - // The prefix to use when evaluating an AND predicate. + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. Prefix *string `type:"string"` // The list of tags to use when evaluating an AND predicate. @@ -6558,6 +10429,8 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -6566,13 +10439,13 @@ type AnalyticsConfiguration struct { // If no filter is provided, all objects will be considered in any analysis. Filter *AnalyticsFilter `type:"structure"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `type:"string" required:"true"` - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. // // StorageClassAnalysis is a required field StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` @@ -6632,6 +10505,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } +// Where to publish the analytics results. type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -6675,6 +10549,9 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -6737,25 +10614,28 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } +// Contains information about where to publish the analytics results. type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` - // The Amazon resource name (ARN) of the bucket to which data is exported. + // The Amazon Resource Name (ARN) of the bucket to which data is exported. // // Bucket is a required field Bucket *string `type:"string" required:"true"` - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. BucketAccountId *string `type:"string"` - // The file format used when exporting data to Amazon S3. + // Specifies the file format used when exporting data to Amazon S3. // // Format is a required field Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - // The prefix to use when exporting data. The exported data begins with this - // prefix. + // The prefix to use when exporting data. The prefix is prepended to all results. Prefix *string `type:"string"` } @@ -6816,6 +10696,8 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. type Bucket struct { _ struct{} `type:"structure"` @@ -6848,9 +10730,14 @@ func (s *Bucket) SetName(v string) *Bucket { return s } +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // // Rules is a required field Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -6894,12 +10781,14 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } +// Container for logging status information. type BucketLoggingStatus struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -6934,9 +10823,16 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } @@ -6980,14 +10876,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } +// Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { _ struct{} `type:"structure"` - // Specifies which headers are allowed in a pre-flight OPTIONS request. + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. // // AllowedMethods is a required field AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` @@ -7063,7 +10963,8 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Describes how a CSV-formatted input object is formatted. +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. type CSVInput struct { _ struct{} `type:"structure"` @@ -7072,24 +10973,45 @@ type CSVInput struct { // to TRUE may lower performance. AllowQuotedRecordDelimiter *bool `type:"boolean"` - // Single character used to indicate a row should be ignored when present at - // the start of a row. + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. Comments *string `type:"string"` - // Value used to separate individual fields in a record. + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // Describes the first line of input. Valid values: None, Ignore, Use. + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` - // Value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string `type:"string"` - // Single character used for escaping the quote character inside an already - // escaped value. + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". QuoteEscapeCharacter *string `type:"string"` - // Value used to separate individual records. + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -7145,24 +11067,33 @@ func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { return s } -// Describes how CSV-formatted results are formatted. +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. type CSVOutput struct { _ struct{} `type:"structure"` - // Value used to separate individual fields in a record. + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // Value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". QuoteCharacter *string `type:"string"` - // Single character used for escaping the quote character inside an already + // The single character used for escaping the quote character inside an already // escaped value. QuoteEscapeCharacter *string `type:"string"` - // Indicates whether or not all output fields should be quoted. + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. QuoteFields *string `type:"string" enum:"QuoteFields"` - // Value used to separate individual records. + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -7206,20 +11137,27 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { return s } +// Container for specifying the AWS Lambda notification configuration. type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. CloudFunction *string `type:"string"` - // Bucket event for which to send notifications. + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // Bucket events for which to send notifications. Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The role supporting the invocation of the Lambda function InvocationRole *string `type:"string"` } @@ -7263,9 +11201,15 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. type CommonPrefix struct { _ struct{} `type:"structure"` + // Container for the specified common prefix. Prefix *string `type:"string"` } @@ -7286,22 +11230,30 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { } type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The container for the multipart upload request information. MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // ID for the initiated multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -7322,6 +11274,9 @@ func (s *CompleteMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -7375,35 +11330,61 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` + // The name of the bucket that contains the newly created object. Bucket *string `type:"string"` - // Entity tag of the object. + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. ETag *string `type:"string"` // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + // The object key of the newly created object. Key *string `min:"1" type:"string"` + // The URI that identifies the newly created object. Location *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Version of the object. + // Version ID of the newly created object, in case the bucket has versioning + // turned on. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -7478,9 +11459,11 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } +// The container for the completed multipart upload details. type CompletedMultipartUpload struct { _ struct{} `type:"structure"` + // Array of CompletedPart data types. Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } @@ -7500,6 +11483,7 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } +// Details of the parts that were uploaded. type CompletedPart struct { _ struct{} `type:"structure"` @@ -7533,6 +11517,10 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. type Condition struct { _ struct{} `type:"structure"` @@ -7601,12 +11589,21 @@ func (s *ContinuationEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + type CopyObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the destination bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -7646,17 +11643,18 @@ type CopyObjectInput struct { // Copies the object if it hasn't been modified since the specified time. CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. @@ -7674,6 +11672,8 @@ type CopyObjectInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // The key of the destination object. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -7684,35 +11684,53 @@ type CopyObjectInput struct { // with metadata provided in the request. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. @@ -7720,7 +11738,7 @@ type CopyObjectInput struct { // The tag-set for the object destination object this value must be used in // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters + // Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // Specifies whether the object tag-set are copied from the source object or @@ -7749,6 +11767,9 @@ func (s *CopyObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -7917,6 +11938,24 @@ func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { s.RequestPayer = &v @@ -7948,6 +11987,12 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { s.SSEKMSKeyId = &v @@ -7984,11 +12029,27 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` + // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` + // Version of the copied object in the destination bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. @@ -8004,16 +12065,22 @@ type CopyObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. @@ -8066,6 +12133,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { s.SSEKMSKeyId = &v @@ -8084,11 +12157,16 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } +// Container for all response elements. type CopyObjectResult struct { _ struct{} `type:"structure"` + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied object. ETag *string `type:"string"` + // Returns the date that the object was last modified. LastModified *time.Time `type:"timestamp"` } @@ -8114,6 +12192,7 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } +// Container for all response elements. type CopyPartResult struct { _ struct{} `type:"structure"` @@ -8146,11 +12225,12 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } +// The configuration information for the bucket. type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -8171,14 +12251,17 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke } type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // The name of the bucket to create. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The configuration information for the bucket. CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -8196,6 +12279,9 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } // String returns the string representation @@ -8214,6 +12300,9 @@ func (s *CreateBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8276,9 +12365,18 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + type CreateBucketOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. Location *string `location:"header" locationName:"Location" type:"string"` } @@ -8299,11 +12397,13 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { } type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the bucket to which to initiate the upload + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8339,47 +12439,67 @@ type CreateMultipartUploadInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Object key for which the multipart upload is to be initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object @@ -8404,6 +12524,9 @@ func (s *CreateMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -8508,6 +12631,24 @@ func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMu return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { s.RequestPayer = &v @@ -8539,6 +12680,12 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { s.SSEKMSKeyId = &v @@ -8569,17 +12716,47 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat return s } +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Bucket *string `locationName:"Bucket" type:"string"` // Object key for which the multipart upload was initiated. @@ -8595,16 +12772,22 @@ type CreateMultipartUploadOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // ID for the initiated multipart upload. @@ -8670,6 +12853,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { s.SSEKMSKeyId = &v @@ -8688,9 +12877,56 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` +} + +// String returns the string representation +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +// Container for the objects to delete. type Delete struct { _ struct{} `type:"structure"` + // The objects to delete. + // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -8745,14 +12981,14 @@ func (s *Delete) SetQuiet(v bool) *Delete { } type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is deleted. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -8774,6 +13010,9 @@ func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -8803,6 +13042,20 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8818,8 +13071,10 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { } type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + // Specifies the bucket whose cors configuration is being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -8840,6 +13095,9 @@ func (s *DeleteBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8860,6 +13118,20 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -8875,7 +13147,7 @@ func (s DeleteBucketCorsOutput) GoString() string { } type DeleteBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` // The name of the bucket containing the server-side encryption configuration // to delete. @@ -8900,6 +13172,9 @@ func (s *DeleteBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8920,6 +13195,20 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -8935,8 +13224,10 @@ func (s DeleteBucketEncryptionOutput) GoString() string { } type DeleteBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + // Specifies the bucket being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -8957,6 +13248,9 @@ func (s *DeleteBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8977,8 +13271,22 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to delete. // @@ -9007,6 +13315,9 @@ func (s *DeleteBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9036,6 +13347,20 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI return s } +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9051,8 +13376,10 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { } type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + // The bucket name of the lifecycle to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9073,6 +13400,9 @@ func (s *DeleteBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9093,6 +13423,20 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -9108,7 +13452,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string { } type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to delete. // @@ -9137,6 +13481,9 @@ func (s *DeleteBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9166,6 +13513,20 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet return s } +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9195,8 +13556,10 @@ func (s DeleteBucketOutput) GoString() string { } type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9217,6 +13580,9 @@ func (s *DeleteBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9237,6 +13603,20 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -9252,8 +13632,10 @@ func (s DeleteBucketPolicyOutput) GoString() string { } type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9274,6 +13656,9 @@ func (s *DeleteBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9294,6 +13679,20 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -9309,8 +13708,10 @@ func (s DeleteBucketReplicationOutput) GoString() string { } type DeleteBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + // The bucket that has the tag set to be removed. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9331,6 +13732,9 @@ func (s *DeleteBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9351,6 +13755,20 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -9366,8 +13784,10 @@ func (s DeleteBucketTaggingOutput) GoString() string { } type DeleteBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + // The bucket name for which you want to remove the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9388,6 +13808,9 @@ func (s *DeleteBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9408,6 +13831,20 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -9422,6 +13859,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } +// Information about the delete marker. type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -9435,6 +13873,7 @@ type DeleteMarkerEntry struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // The account that created the delete marker.> Owner *Owner `type:"structure"` // Version ID of an object. @@ -9481,23 +13920,78 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -type DeleteObjectInput struct { +// Specifies whether Amazon S3 replicates the delete markers. If you specify +// a Filter, you must specify this element. However, in the latest version of +// replication configuration (when Filter is specified), Amazon S3 doesn't replicate +// delete markers. Therefore, the DeleteMarkerReplication element can contain +// only Disabled. For an example configuration, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, Amazon +// S3 handled replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { _ struct{} `type:"structure"` + // Indicates whether to replicate delete markers. + // + // In the current implementation, Amazon S3 doesn't replicate the delete markers. + // The status must be Disabled. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Key name of the object to delete. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -9520,6 +14014,9 @@ func (s *DeleteObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9546,6 +14043,12 @@ func (s *DeleteObjectInput) getBucket() (v string) { return *s.Bucket } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -9570,6 +14073,20 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -9615,11 +14132,22 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { } type DeleteObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + // The bucket name containing the objects from which to remove the tags. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the object key. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -9643,6 +14171,9 @@ func (s *DeleteObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9681,6 +14212,20 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -9705,22 +14250,41 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO } type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + // The bucket name containing the objects to delete. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Container for the request. + // // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -9740,6 +14304,9 @@ func (s *DeleteObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Delete == nil { invalidParams.Add(request.NewErrParamRequired("Delete")) } @@ -9768,6 +14335,12 @@ func (s *DeleteObjectsInput) getBucket() (v string) { return *s.Bucket } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -9786,11 +14359,29 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectsOutput struct { _ struct{} `type:"structure"` + // Container element for a successful delete. It identifies the object that + // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the @@ -9826,15 +14417,100 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { return s } +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. type DeletedObject struct { _ struct{} `type:"structure"` + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. DeleteMarker *bool `type:"boolean"` + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. DeleteMarkerVersionId *string `type:"string"` + // The name of the deleted object. Key *string `min:"1" type:"string"` + // The version ID of the deleted object. VersionId *string `type:"string"` } @@ -9872,27 +14548,53 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// Container for replication destination information. +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { _ struct{} `type:"structure"` - // Container for information regarding the access control for replicas. + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. AccessControlTranslation *AccessControlTranslation `type:"structure"` - // Account ID of the destination bucket. Currently this is only being verified - // if Access Control Translation is enabled + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store the results. // // Bucket is a required field Bucket *string `type:"string" required:"true"` - // Container for information regarding encryption based configuration for replicas. + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // The class of storage used to store the object. + // A container specifying replication metrics-related settings enabling metrics + // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified + // together with a ReplicationTime block. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -9917,6 +14619,16 @@ func (s *Destination) Validate() error { invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) } } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9955,19 +14667,30 @@ func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *De return s } +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Describes the server-side encryption that will be applied to the restore -// results. +// Contains the type of server-side encryption used. type Encryption struct { _ struct{} `type:"structure"` // The server-side encryption algorithm used when storing job results in Amazon - // S3 (e.g., AES256, aws:kms). + // S3 (for example, AES256, aws:kms). // // EncryptionType is a required field EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` @@ -9976,9 +14699,12 @@ type Encryption struct { // the encryption context for the restore results. KMSContext *string `type:"string"` - // If the encryption type is aws:kms, this optional value specifies the AWS - // KMS key ID to use for encryption of job results. - KMSKeyId *string `type:"string"` + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed AWS KMS CMK to use for encryption of job results. + // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric + // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + KMSKeyId *string `type:"string" sensitive:"true"` } // String returns the string representation @@ -10022,11 +14748,17 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { return s } -// Container for information regarding encryption based configuration for replicas. +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The id of the KMS key used to encrypt the replica object. + // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer + // master key (CMK) stored in AWS Key Management Service (KMS) for the destination + // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only + // supports symmetric customer managed CMKs. For more information, see Using + // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -10046,6 +14778,9 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig return s } +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. type EndEvent struct { _ struct{} `locationName:"EndEvent" type:"structure"` } @@ -10072,15 +14807,382 @@ func (s *EndEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. type Error struct { _ struct{} `type:"structure"` + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client Code *string `type:"string"` + // The error key. Key *string `min:"1" type:"string"` + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. Message *string `type:"string"` + // The version ID of the error. VersionId *string `type:"string"` } @@ -10118,6 +15220,7 @@ func (s *Error) SetVersionId(v string) *Error { return s } +// The error information. type ErrorDocument struct { _ struct{} `type:"structure"` @@ -10159,17 +15262,58 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } -// Container for key value pair that defines the criteria for the filter rule. +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 Developer Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. type FilterRule struct { _ struct{} `type:"structure"` - // Object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` + // The value that the filter searches for in object key names. Value *string `type:"string"` } @@ -10196,7 +15340,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule { } type GetBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` // Name of the bucket for which the accelerate configuration is retrieved. // @@ -10220,6 +15364,9 @@ func (s *GetBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10240,6 +15387,20 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -10264,8 +15425,10 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA } type GetBucketAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + // Specifies the S3 bucket whose ACL is being requested. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10286,6 +15449,9 @@ func (s *GetBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10306,12 +15472,27 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -10338,14 +15519,14 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { } type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -10367,6 +15548,9 @@ func (s *GetBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -10396,6 +15580,20 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti return s } +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -10420,8 +15618,10 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana } type GetBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + // The bucket name for which to get the cors configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10442,6 +15642,9 @@ func (s *GetBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10462,9 +15665,25 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } @@ -10485,7 +15704,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { } type GetBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` // The name of the bucket from which the server-side encryption configuration // is retrieved. @@ -10510,6 +15729,9 @@ func (s *GetBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10530,11 +15752,24 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } @@ -10555,7 +15790,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv } type GetBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to retrieve. // @@ -10584,6 +15819,9 @@ func (s *GetBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -10613,6 +15851,20 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento return s } +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -10637,8 +15889,10 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv } type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10659,6 +15913,9 @@ func (s *GetBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10679,9 +15936,24 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } @@ -10702,8 +15974,10 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge } type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10724,6 +15998,9 @@ func (s *GetBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10744,9 +16021,24 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` } @@ -10767,8 +16059,10 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput } type GetBucketLocationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + // The name of the bucket for which to get the location. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10789,6 +16083,9 @@ func (s *GetBucketLocationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10809,9 +16106,26 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -10832,8 +16146,10 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca } type GetBucketLoggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + // The bucket name for which to get the logging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10854,6 +16170,9 @@ func (s *GetBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10874,12 +16193,27 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -10900,7 +16234,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket } type GetBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to retrieve. // @@ -10929,6 +16263,9 @@ func (s *GetBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -10958,6 +16295,20 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo return s } +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -10982,9 +16333,9 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics } type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - // Name of the bucket to get the notification configuration for. + // Name of the bucket for which to get the notification configuration. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11006,6 +16357,9 @@ func (s *GetBucketNotificationConfigurationRequest) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11026,9 +16380,25 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + // The bucket name for which to get the bucket policy. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11049,6 +16419,9 @@ func (s *GetBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11069,6 +16442,20 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -11092,9 +16479,96 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { return s } +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + // The bucket name for which to get the replication information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11115,6 +16589,9 @@ func (s *GetBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11135,11 +16612,25 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. ReplicationConfiguration *ReplicationConfiguration `type:"structure"` } @@ -11160,8 +16651,10 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC } type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + // The name of the bucket for which to get the payment request configuration + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11182,6 +16675,9 @@ func (s *GetBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11202,6 +16698,20 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -11226,8 +16736,10 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym } type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + // The name of the bucket for which to get the tagging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11248,6 +16760,9 @@ func (s *GetBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11268,9 +16783,25 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -11292,8 +16823,10 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { } type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + // The name of the bucket for which to get the versioning information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11314,6 +16847,9 @@ func (s *GetBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11334,6 +16870,20 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -11369,8 +16919,10 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp } type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + // The bucket name for which to get the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11391,6 +16943,9 @@ func (s *GetBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11411,15 +16966,34 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` + // The object key name of the website error document to use for 4XX class errors. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website (for example index.html). IndexDocument *IndexDocument `type:"structure"` + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -11458,18 +17032,30 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb } type GetObjectAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + // The bucket name that contains the object for which to get the ACL information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The key of the object for which to get the ACL information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -11492,6 +17078,9 @@ func (s *GetObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -11536,12 +17125,27 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` // If present, indicates that the requester was successfully charged for the @@ -11578,8 +17182,17 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { } type GetObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11599,6 +17212,8 @@ type GetObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // Key of the object to get. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -11608,13 +17223,17 @@ type GetObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. @@ -11635,41 +17254,251 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + // The bucket name containing the object whose Legal Hold status you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // The key name for the object whose Legal Hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // VersionId used to reference a specific version of the object. + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetObjectInput) String() string { +func (s GetObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectInput) GoString() string { +func (s GetObjectLegalHoldInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -11684,136 +17513,162 @@ func (s *GetObjectInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { s.Bucket = &v return s } -func (s *GetObjectInput) getBucket() (v string) { +func (s *GetObjectLegalHoldInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -// SetIfMatch sets the IfMatch field's value. -func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { - s.IfMatch = &v +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v return s } -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { - s.IfModifiedSince = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v return s } -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { - s.IfNoneMatch = &v +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v return s } -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { - s.IfUnmodifiedSince = &v - return s +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) } -// SetKey sets the Key field's value. -func (s *GetObjectInput) SetKey(v string) *GetObjectInput { - s.Key = &v - return s +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } -// SetPartNumber sets the PartNumber field's value. -func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { - s.PartNumber = &v - return s +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` } -// SetRange sets the Range field's value. -func (s *GetObjectInput) SetRange(v string) *GetObjectInput { - s.Range = &v - return s +// String returns the string representation +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) } -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { - s.RequestPayer = &v - return s +// GoString returns the string representation +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() } -// SetResponseCacheControl sets the ResponseCacheControl field's value. -func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { - s.ResponseCacheControl = &v +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v return s } -// SetResponseContentDisposition sets the ResponseContentDisposition field's value. -func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { - s.ResponseContentDisposition = &v - return s +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose Object Lock configuration you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } -// SetResponseContentEncoding sets the ResponseContentEncoding field's value. -func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { - s.ResponseContentEncoding = &v - return s +// String returns the string representation +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetResponseContentLanguage sets the ResponseContentLanguage field's value. -func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { - s.ResponseContentLanguage = &v - return s +// GoString returns the string representation +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() } -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v return s } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v - return s +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v - return s +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) } -func (s *GetObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false } - return *s.SSECustomerKey + return arn.IsARN(*s.Bucket) } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v - return s +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v +// String returns the string representation +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v return s } type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. @@ -11847,11 +17702,11 @@ type GetObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -11863,6 +17718,10 @@ type GetObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -11871,9 +17730,21 @@ type GetObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the @@ -11890,18 +17761,21 @@ type GetObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The number of tags, if any, on the object. @@ -12016,93 +17890,255 @@ func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { return s } -// SetMissingMeta sets the MissingMeta field's value. -func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { - s.MissingMeta = &v - return s +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionInput) GoString() string { + return s.String() } -// SetPartsCount sets the PartsCount field's value. -func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { - s.PartsCount = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { - s.ReplicationStatus = &v +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { - s.RequestCharged = &v - return s +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetRestore sets the Restore field's value. -func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { - s.Restore = &v +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v return s } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { - s.SSECustomerAlgorithm = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { - s.SSECustomerKeyMD5 = &v +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { - s.SSEKMSKeyId = &v - return s +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { - s.ServerSideEncryption = &v - return s +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } -// SetStorageClass sets the StorageClass field's value. -func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { - s.StorageClass = &v - return s +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` } -// SetTagCount sets the TagCount field's value. -func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { - s.TagCount = &v - return s +// String returns the string representation +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { - s.VersionId = &v - return s +// GoString returns the string representation +func (s GetObjectRetentionOutput) GoString() string { + return s.String() } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { - s.WebsiteRedirectLocation = &v +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v return s } type GetObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + // The bucket name containing the object for which to get the tagging information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which to get the tagging information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -12122,6 +18158,9 @@ func (s *GetObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12160,12 +18199,29 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + // The versionId of the object for which you got the tagging information. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -12192,18 +18248,24 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput } type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + // The name of the bucket containing the object for which to get the torrent + // files. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The object key for which to get the information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -12223,6 +18285,9 @@ func (s *GetObjectTorrentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12261,9 +18326,24 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` + // A Bencoded dictionary as defined by the BitTorrent specification Body io.ReadCloser `type:"blob"` // If present, indicates that the requester was successfully charged for the @@ -12293,10 +18373,98 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu return s } +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. type GlacierJobParameters struct { _ struct{} `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -12331,9 +18499,11 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } +// Container for grant information. type Grant struct { _ struct{} `type:"structure"` + // The person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. @@ -12377,6 +18547,7 @@ func (s *Grant) SetPermission(v string) *Grant { return s } +// Container for the person being granted permissions. type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -12384,6 +18555,29 @@ type Grantee struct { DisplayName *string `type:"string"` // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. EmailAddress *string `type:"string"` // The canonical user ID of the grantee. @@ -12452,8 +18646,10 @@ func (s *Grantee) SetURI(v string) *Grantee { } type HeadBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12474,6 +18670,9 @@ func (s *HeadBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -12494,6 +18693,20 @@ func (s *HeadBucketInput) getBucket() (v string) { return *s.Bucket } +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -12509,8 +18722,10 @@ func (s HeadBucketOutput) GoString() string { } type HeadObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + // The name of the bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12530,6 +18745,8 @@ type HeadObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // The object key. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -12540,28 +18757,32 @@ type HeadObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -12584,6 +18805,9 @@ func (s *HeadObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12689,9 +18913,24 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadObjectOutput struct { _ struct{} `type:"structure"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Specifies caching behavior along the request/reply chain. @@ -12719,11 +18958,11 @@ type HeadObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -12735,6 +18974,10 @@ type HeadObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -12743,17 +18986,70 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket where Amazon S3 stores object replicas. When you request + // an object (GetObject) or object metadata (HeadObject) from these buckets, + // Amazon S3 will return the x-amz-replication-status header in the response + // as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from the destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time - // of the restored object copy. + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, @@ -12762,18 +19058,25 @@ type HeadObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // Version of the object. @@ -12879,6 +19182,24 @@ func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetPartsCount sets the PartsCount field's value. func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { s.PartsCount = &v @@ -12945,13 +19266,15 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } +// Container for the Suffix element. type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. // // Suffix is a required field Suffix *string `type:"string" required:"true"` @@ -12986,6 +19309,7 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } +// Container element that identifies who initiated the multipart upload. type Initiator struct { _ struct{} `type:"structure"` @@ -13032,6 +19356,9 @@ type InputSerialization struct { // Specifies JSON as object's input serialization format. JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` } // String returns the string representation @@ -13062,6 +19389,15 @@ func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { return s } +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -13079,12 +19415,16 @@ type InventoryConfiguration struct { // Id is a required field Id *string `type:"string" required:"true"` - // Specifies which object version(s) to included in the inventory results. + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. // // IncludedObjectVersions is a required field IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - // Specifies whether the inventory is enabled or disabled. + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. // // IsEnabled is a required field IsEnabled *bool `type:"boolean" required:"true"` @@ -13190,6 +19530,7 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } +// Specifies the inventory configuration for an Amazon S3 bucket. type InventoryDestination struct { _ struct{} `type:"structure"` @@ -13239,10 +19580,10 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin type InventoryEncryption struct { _ struct{} `type:"structure"` - // Specifies the use of SSE-KMS to encrypt delievered Inventory reports. + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` - // Specifies the use of SSE-S3 to encrypt delievered Inventory reports. + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } @@ -13283,6 +19624,8 @@ func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { return s } +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. type InventoryFilter struct { _ struct{} `type:"structure"` @@ -13321,13 +19664,19 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` - // The ID of the account that owns the destination bucket. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. AccountId *string `type:"string"` - // The Amazon resource name (ARN) of the bucket where inventory results will + // The Amazon Resource Name (ARN) of the bucket where inventory results will // be published. // // Bucket is a required field @@ -13414,6 +19763,7 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } +// Specifies the schedule for generating inventory results. type InventorySchedule struct { _ struct{} `type:"structure"` @@ -13452,6 +19802,7 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } +// Specifies JSON as object's input serialization format. type JSONInput struct { _ struct{} `type:"structure"` @@ -13475,10 +19826,12 @@ func (s *JSONInput) SetType(v string) *JSONInput { return s } +// Specifies JSON as request's output serialization format. type JSONOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual records in the output. + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). RecordDelimiter *string `type:"string"` } @@ -13498,12 +19851,12 @@ func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { return s } -// Container for object key name prefix and suffix filtering rules. +// A container for object key name prefix and suffix filtering rules. type KeyFilter struct { _ struct{} `type:"structure"` - // A list of containers for key value pair that defines the criteria for the - // filter rule. + // A list of containers for the key-value pair that defines the criteria for + // the filter rule. FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } @@ -13523,24 +19876,28 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { return s } -// Container for specifying the AWS Lambda notification configuration. +// A container for specifying the configuration for AWS Lambda notifications. type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. // // LambdaFunctionArn is a required field LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` @@ -13596,9 +19953,12 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } +// Container for lifecycle rules. You can add as many as 1000 rules. type LifecycleConfiguration struct { _ struct{} `type:"structure"` + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // // Rules is a required field Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -13642,6 +20002,7 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } +// Container for the expiration for the lifecycle of the object. type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -13688,13 +20049,19 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } +// A lifecycle rule for individual objects in an Amazon S3 bucket. type LifecycleRule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. @@ -13711,10 +20078,17 @@ type LifecycleRule struct { // period in the object's lifetime. NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is - // deprecated; use Filter instead. + // No longer used; use Filter instead. + // + // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule @@ -13723,6 +20097,7 @@ type LifecycleRule struct { // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an Amazon S3 object transitions to a specified storage class. Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } @@ -13814,6 +20189,7 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` + // Prefix identifying one or more objects to which the rule applies. Prefix *string `type:"string"` // All of these tags must exist in the object's tag set in order for the rule @@ -13929,7 +20305,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { } type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` // The name of the bucket from which analytics configurations are retrieved. // @@ -13957,6 +20333,9 @@ func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -13983,13 +20362,28 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` // The list of analytics configurations for a bucket. AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // The ContinuationToken that represents where this request began. + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. ContinuationToken *string `type:"string"` // Indicates whether the returned list of analytics configurations is complete. @@ -14038,7 +20432,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketInventoryConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` // The name of the bucket containing the inventory configurations to retrieve. // @@ -14068,6 +20462,9 @@ func (s *ListBucketInventoryConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14094,6 +20491,20 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -14104,8 +20515,9 @@ type ListBucketInventoryConfigurationsOutput struct { // The list of inventory configurations for a bucket. InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. IsTruncated *bool `type:"boolean"` // The marker used to continue this inventory configuration listing. Use the @@ -14149,7 +20561,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketMetricsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` // The name of the bucket containing the metrics configurations to retrieve. // @@ -14179,6 +20591,9 @@ func (s *ListBucketMetricsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14205,6 +20620,20 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -14278,8 +20707,10 @@ func (s ListBucketsInput) GoString() string { type ListBucketsOutput struct { _ struct{} `type:"structure"` + // The list of buckets owned by the requestor. Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + // The owner of the buckets listed. Owner *Owner `type:"structure"` } @@ -14306,12 +20737,28 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { } type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -14324,6 +20771,13 @@ type ListMultipartUploadsInput struct { // Together with upload-id-marker, this parameter specifies the multipart upload // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -14332,12 +20786,16 @@ type ListMultipartUploadsInput struct { MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` // Lists in-progress uploads only for those keys that begin with the specified - // prefix. + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -14357,6 +20815,9 @@ func (s *ListMultipartUploadsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14413,17 +20874,42 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType *string `type:"string" enum:"EncodingType"` // Indicates whether the returned list of multipart uploads is truncated. A @@ -14454,6 +20940,8 @@ type ListMultipartUploadsOutput struct { // Upload ID after which listing began. UploadIdMarker *string `type:"string"` + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } @@ -14547,12 +21035,25 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti } type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + // The bucket name that contains the objects. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -14566,11 +21067,19 @@ type ListObjectVersionsInput struct { // Specifies the key to start with when listing objects in a bucket. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Limits the response to keys that begin with the specified prefix. + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Specifies the object version you want to start listing from. @@ -14593,6 +21102,9 @@ func (s *ListObjectVersionsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14649,42 +21161,81 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Container for an object that is a delete marker. DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the // rest of the results. IsTruncated *bool `type:"boolean"` - // Marks the last Key returned in a truncated response. + // Marks the last key returned in a truncated response. KeyMarker *string `type:"string"` + // Specifies the maximum number of objects to return. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` - // Use this value for the key marker request parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. NextKeyMarker *string `type:"string"` - // Use this value for the next version id marker parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. NextVersionIdMarker *string `type:"string"` + // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string `type:"string"` + // Container for version information. Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } @@ -14777,8 +21328,10 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe } type ListObjectsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + // The name of the bucket containing the objects. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14796,8 +21349,9 @@ type ListObjectsInput struct { // Specifies the key to start with when listing objects in a bucket. Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -14825,6 +21379,9 @@ func (s *ListObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14881,37 +21438,77 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up in a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. IsTruncated *bool `type:"boolean"` + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. Marker *string `type:"string"` + // The maximum number of keys returned in the response body. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` // When response is truncated (the IsTruncated element value in the response // is true), you can use the key name in this field as marker in the subsequent // request to get next set of objects. Amazon S3 lists objects in alphabetical // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, + // specified. If response does not include the NextMarker and it is truncated, // you can use the value of the last Key in the response as the marker in the // subsequent request to get the next set of object keys. NextMarker *string `type:"string"` + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` } @@ -14986,16 +21583,23 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { } type ListObjectsV2Input struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - // Name of the bucket to list. + // Bucket name to list. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // ContinuationToken indicates Amazon S3 that the list is being continued on // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // key. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // A delimiter is a character you use to group keys. @@ -15006,11 +21610,12 @@ type ListObjectsV2Input struct { // The owner field is not present in listV2 by default, if you want to return // owner field with each key in the result then set the fetch owner field to - // true + // true. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -15022,7 +21627,7 @@ type ListObjectsV2Input struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // listing after this specified key. StartAfter can be any key in the bucket. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -15042,6 +21647,9 @@ func (s *ListObjectsV2Input) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15110,29 +21718,65 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsV2Output struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // If ContinuationToken was sent with the request, it is included in the response. ContinuationToken *string `type:"string"` - // A delimiter is a character you use to group keys. + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will @@ -15140,24 +21784,31 @@ type ListObjectsV2Output struct { // result will include less than equals 50 keys KeyCount *int64 `type:"integer"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `type:"integer"` - // Name of the bucket to list. + // Bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string"` - // NextContinuationToken is sent when isTruncated is true which means there + // NextContinuationToken is sent when isTruncated is true, which means there // are more keys in the bucket that can be listed. The next list requests to // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken // is obfuscated and is not a real key NextContinuationToken *string `type:"string"` - // Limits the response to keys that begin with the specified prefix. + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // If StartAfter was sent with the request, it is included in the response. StartAfter *string `type:"string"` } @@ -15244,11 +21895,22 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { } type ListPartsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + // Name of the bucket to which the parts are being uploaded. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -15259,10 +21921,11 @@ type ListPartsInput struct { // part numbers will be listed. PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -15287,6 +21950,9 @@ func (s *ListPartsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -15346,23 +22012,51 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListPartsOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` - // Identifies who initiated the multipart upload. + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. Initiator *Initiator `type:"structure"` - // Indicates whether the returned list of parts is truncated. + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. IsTruncated *bool `type:"boolean"` // Object key for which the multipart upload was initiated. @@ -15376,18 +22070,26 @@ type ListPartsOutput struct { // in a subsequent request. NextPartNumberMarker *int64 `type:"integer"` + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. Owner *Owner `type:"structure"` - // Part number after which listing begins. + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. PartNumberMarker *int64 `type:"integer"` + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. Parts []*Part `locationName:"Part" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // The class of storage used to store the object. + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -15495,7 +22197,8 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Describes an S3 location that will receive the results of the restore request. +// Describes an Amazon S3 location that will receive the results of the restore +// request. type Location struct { _ struct{} `type:"structure"` @@ -15510,8 +22213,7 @@ type Location struct { // The canned ACL to apply to the restore results. CannedACL *string `type:"string" enum:"ObjectCannedACL"` - // Describes the server-side encryption that will be applied to the restore - // results. + // Contains the type of server-side encryption used. Encryption *Encryption `type:"structure"` // The prefix that is prepended to the restore results for this request. @@ -15623,26 +22325,29 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } -// Container for logging information. Presence of this element indicates that -// logging is enabled. Parameters TargetBucket and TargetPrefix are required -// in this case. +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. type LoggingEnabled struct { _ struct{} `type:"structure"` // Specifies the bucket where you want Amazon S3 to store server access logs. // You can have your logs delivered to any bucket that you own, including the // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should + // to deliver their logs to the same target bucket. In this case, you should // choose a different TargetPrefix for each source bucket so that the delivered // log files can be distinguished by key. // // TargetBucket is a required field TargetBucket *string `type:"string" required:"true"` + // Container for granting information. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. // // TargetPrefix is a required field TargetPrefix *string `type:"string" required:"true"` @@ -15706,8 +22411,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { type MetadataEntry struct { _ struct{} `type:"structure"` + // Name of the Object. Name *string `type:"string"` + // Value of the Object. Value *string `type:"string"` } @@ -15733,6 +22440,65 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } +// A container specifying replication metrics-related settings enabling metrics +// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified +// together with a ReplicationTime block. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + // + // EventThreshold is a required field + EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.EventThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("EventThreshold")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -15785,6 +22551,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -15839,6 +22612,9 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). type MetricsFilter struct { _ struct{} `type:"structure"` @@ -15902,6 +22678,7 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } +// Container for the MultipartUpload for the Amazon S3 object. type MultipartUpload struct { _ struct{} `type:"structure"` @@ -15914,6 +22691,7 @@ type MultipartUpload struct { // Key of the object for which the multipart upload was initiated. Key *string `min:"1" type:"string"` + // Specifies the owner of the object that is part of the multipart upload. Owner *Owner `type:"structure"` // The class of storage used to store the object. @@ -15980,8 +22758,8 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -16002,19 +22780,20 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your -// bucket is versioning-enabled (or versioning is suspended), you can set this -// action to request that Amazon S3 transition noncurrent object versions to -// the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period -// in the object's lifetime. +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// is suspended), you can set this action to request that Amazon S3 transition +// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's +// lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -16043,15 +22822,21 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi return s } -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. type NotificationConfiguration struct { _ struct{} `type:"structure"` + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + // The topic to which notifications are sent and the events for which notifications + // are generated. TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } @@ -16126,10 +22911,17 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` + // Container for specifying the AWS Lambda notification configuration. CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } @@ -16161,13 +22953,13 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf return s } -// Container for object key name filtering rules. For information about key -// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` - // Container for object key name prefix and suffix filtering rules. + // A container for object key name prefix and suffix filtering rules. Key *KeyFilter `locationName:"S3Key" type:"structure"` } @@ -16187,17 +22979,39 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } +// An object consists of data and its descriptive metadata. type Object struct { _ struct{} `type:"structure"` + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, + // have ETags that are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, + // have ETags that are not an MD5 digest of their object data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. ETag *string `type:"string"` + // The name that you assign to an object. You use the object key to retrieve + // the object. Key *string `min:"1" type:"string"` + // The date the Object was Last Modified LastModified *time.Time `type:"timestamp"` + // The owner of the object Owner *Owner `type:"structure"` + // Size in bytes of the object Size *int64 `type:"integer"` // The class of storage used to store the object. @@ -16250,6 +23064,7 @@ func (s *Object) SetStorageClass(v string) *Object { return s } +// Object Identifier is unique value to identify objects. type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -16300,9 +23115,126 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an Object Lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // The Object Lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +// The version of an object. type ObjectVersion struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of that version of the object. ETag *string `type:"string"` // Specifies whether the object is (true) or is not (false) the latest version @@ -16315,6 +23247,7 @@ type ObjectVersion struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // Specifies the owner of the object. Owner *Owner `type:"structure"` // Size in bytes of the object. @@ -16457,11 +23390,14 @@ func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { return s } +// Container for the owner's display name and ID. type Owner struct { _ struct{} `type:"structure"` + // Container for the display name of the owner. DisplayName *string `type:"string"` + // Container for the ID of the owner. ID *string `type:"string"` } @@ -16481,12 +23417,28 @@ func (s *Owner) SetDisplayName(v string) *Owner { return s } -// SetID sets the ID field's value. -func (s *Owner) SetID(v string) *Owner { - s.ID = &v - return s +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// Container for Parquet. +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParquetInput) GoString() string { + return s.String() } +// Container for elements related to a part. type Part struct { _ struct{} `type:"structure"` @@ -16500,7 +23452,7 @@ type Part struct { // 10,000. PartNumber *int64 `type:"integer"` - // Size of the uploaded part data. + // Size in bytes of the uploaded part data. Size *int64 `type:"integer"` } @@ -16538,16 +23490,42 @@ func (s *Part) SetSize(v int64) *Part { return s } +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +// This data type contains information about progress of an operation. type Progress struct { _ struct{} `type:"structure"` - // Current number of uncompressed object bytes processed. + // The current number of uncompressed object bytes processed. BytesProcessed *int64 `type:"long"` - // Current number of bytes of records payload data returned. + // The current number of bytes of records payload data returned. BytesReturned *int64 `type:"long"` - // Current number of object bytes scanned. + // The current number of object bytes scanned. BytesScanned *int64 `type:"long"` } @@ -16579,6 +23557,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress { return s } +// This data type contains information about the progress event of an operation. type ProgressEvent struct { _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` @@ -16619,10 +23598,104 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS services and authorized users within this account if the bucket has a + // public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` - // Specifies the Accelerate Configuration you want to set for the bucket. + // Container for setting the transfer acceleration state. // // AccelerateConfiguration is a required field AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -16652,6 +23725,9 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -16678,6 +23754,20 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16693,13 +23783,16 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { } type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket to which to apply the ACL. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16736,6 +23829,9 @@ func (s *PutBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.AccessControlPolicy != nil { if err := s.AccessControlPolicy.Validate(); err != nil { invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) @@ -16803,6 +23899,20 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -16818,7 +23928,7 @@ func (s PutBucketAclOutput) GoString() string { } type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // @@ -16830,7 +23940,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -16855,6 +23965,9 @@ func (s *PutBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -16895,6 +24008,20 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti return s } +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16910,11 +24037,18 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { } type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + // Specifies the bucket impacted by the corsconfiguration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -16935,6 +24069,9 @@ func (s *PutBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CORSConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) } @@ -16969,6 +24106,20 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -16984,16 +24135,18 @@ func (s PutBucketCorsOutput) GoString() string { } type PutBucketEncryptionInput struct { - _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` - // The name of the bucket for which the server-side encryption configuration - // is set. + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. // // ServerSideEncryptionConfiguration is a required field ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -17015,6 +24168,9 @@ func (s *PutBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.ServerSideEncryptionConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) } @@ -17049,6 +24205,20 @@ func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *Serve return s } +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -17064,7 +24234,7 @@ func (s PutBucketEncryptionOutput) GoString() string { } type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` // The name of the bucket where the inventory configuration will be stored. // @@ -17098,6 +24268,9 @@ func (s *PutBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -17141,6 +24314,20 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17156,11 +24343,14 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { } type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + // The name of the bucket for which to set the configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17180,6 +24370,9 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -17211,6 +24404,20 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck return s } +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17226,11 +24433,12 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { } type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17250,6 +24458,9 @@ func (s *PutBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -17281,6 +24492,20 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur return s } +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -17296,11 +24521,15 @@ func (s PutBucketLifecycleOutput) GoString() string { } type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + // The name of the bucket for which to set the logging parameters. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for logging status information. + // // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17321,6 +24550,9 @@ func (s *PutBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.BucketLoggingStatus == nil { invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) } @@ -17355,6 +24587,20 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -17370,7 +24616,7 @@ func (s PutBucketLoggingOutput) GoString() string { } type PutBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` // The name of the bucket for which the metrics configuration is set. // @@ -17404,6 +24650,9 @@ func (s *PutBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -17447,6 +24696,20 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17462,13 +24725,15 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { } type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -17490,6 +24755,9 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -17524,6 +24792,20 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17539,11 +24821,15 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { } type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The container for the configuration. + // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17564,6 +24850,9 @@ func (s *PutBucketNotificationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -17593,6 +24882,20 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio return s } +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -17608,8 +24911,10 @@ func (s PutBucketNotificationOutput) GoString() string { } type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -17639,6 +24944,9 @@ func (s *PutBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Policy == nil { invalidParams.Add(request.NewErrParamRequired("Policy")) } @@ -17674,6 +24982,20 @@ func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -17689,16 +25011,20 @@ func (s PutBucketPolicyOutput) GoString() string { } type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + // The name of the bucket + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. // // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation @@ -17717,6 +25043,9 @@ func (s *PutBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.ReplicationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) } @@ -17751,6 +25080,26 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -17766,11 +25115,15 @@ func (s PutBucketReplicationOutput) GoString() string { } type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for Payer. + // // RequestPaymentConfiguration is a required field RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17791,6 +25144,9 @@ func (s *PutBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.RequestPaymentConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) } @@ -17825,6 +25181,20 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request return s } +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -17840,11 +25210,15 @@ func (s PutBucketRequestPaymentOutput) GoString() string { } type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the TagSet and Tag elements. + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17865,6 +25239,9 @@ func (s *PutBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Tagging == nil { invalidParams.Add(request.NewErrParamRequired("Tagging")) } @@ -17899,6 +25276,20 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -17914,8 +25305,10 @@ func (s PutBucketTaggingOutput) GoString() string { } type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -17923,6 +25316,8 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + // Container for setting the versioning state. + // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17943,6 +25338,9 @@ func (s *PutBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.VersioningConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) } @@ -17978,6 +25376,20 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -17993,11 +25405,15 @@ func (s PutBucketVersioningOutput) GoString() string { } type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the request. + // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -18018,6 +25434,9 @@ func (s *PutBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.WebsiteConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) } @@ -18046,10 +25465,24 @@ func (s *PutBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } -// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. -func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { - s.WebsiteConfiguration = v - return s +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } type PutBucketWebsiteOutput struct { @@ -18067,13 +25500,25 @@ func (s PutBucketWebsiteOutput) GoString() string { } type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18093,13 +25538,16 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Key for which the PUT operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -18122,6 +25570,9 @@ func (s *PutObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -18213,6 +25664,20 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -18238,44 +25703,64 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { } type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. Body io.ReadSeeker `type:"blob"` - // Name of the bucket to which the PUT operation was initiated. + // Bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -18298,46 +25783,86 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, S3 Standard is the default storage class. Amazon S3 + // supports other storage classes. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -18357,6 +25882,9 @@ func (s *PutObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -18479,6 +26007,24 @@ func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { s.RequestPayer = &v @@ -18510,33 +26056,313 @@ func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v - return s -} +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket name containing the object that you want to place a Legal Hold + // on. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v - return s + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s +// String returns the string representation +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v - return s +// GoString returns the string representation +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v return s } @@ -18546,8 +26372,11 @@ type PutObjectOutput struct { // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the @@ -18560,16 +26389,25 @@ type PutObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. @@ -18616,6 +26454,12 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { s.SSEKMSKeyId = &v @@ -18629,55 +26473,341 @@ func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { } // SetVersionId sets the VersionId field's value. -func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this operation should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { s.VersionId = &v return s } -type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutObjectTaggingInput) String() string { +func (s PutPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectTaggingInput) GoString() string { +func (s PutPublicAccessBlockInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) } if invalidParams.Len() > 0 { @@ -18687,77 +26817,73 @@ func (s *PutObjectTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { s.Bucket = &v return s } -func (s *PutObjectTaggingInput) getBucket() (v string) { +func (s *PutPublicAccessBlockInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -// SetKey sets the Key field's value. -func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { - s.Key = &v +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v return s } -// SetTagging sets the Tagging field's value. -func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { - s.Tagging = v - return s +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { - s.VersionId = &v - return s +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } -type PutObjectTaggingOutput struct { +type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` - - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s PutObjectTaggingOutput) String() string { +func (s PutPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectTaggingOutput) GoString() string { +func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { - s.VersionId = &v - return s -} - -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. type QueueConfiguration struct { _ struct{} `type:"structure"` + // A collection of bucket events for which to send notifications + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. // // QueueArn is a required field QueueArn *string `locationName:"Queue" type:"string" required:"true"` @@ -18813,18 +26939,27 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` - // Bucket event for which to send notifications. + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of bucket events for which to send notifications Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. Queue *string `type:"string"` } @@ -18862,6 +26997,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } +// The container for the records event. type RecordsEvent struct { _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` @@ -18901,6 +27037,17 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. type Redirect struct { _ struct{} `type:"structure"` @@ -18911,8 +27058,8 @@ type Redirect struct { // siblings is present. HttpRedirectCode *string `type:"string"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` // The object key prefix to use in the redirect request. For example, to redirect @@ -18924,7 +27071,7 @@ type Redirect struct { ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can + // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. ReplaceKeyWith *string `type:"string"` } @@ -18969,16 +27116,18 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` - // Name of the host where requests will be redirected. + // Name of the host where requests are redirected. // // HostName is a required field HostName *string `type:"string" required:"true"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` } @@ -19017,19 +27166,21 @@ func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { return s } -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. type ReplicationConfiguration struct { _ struct{} `type:"structure"` - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field Role *string `type:"string" required:"true"` - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. // // Rules is a required field Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` @@ -19083,29 +27234,71 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// Container for information about a particular replication rule. +// Specifies which Amazon S3 objects to replicate and where to store the replicas. type ReplicationRule struct { _ struct{} `type:"structure"` - // Container for replication destination information. + // Specifies whether Amazon S3 replicates the delete markers. If you specify + // a Filter, you must specify this element. However, in the latest version of + // replication configuration (when Filter is specified), Amazon S3 doesn't replicate + // delete markers. Therefore, the DeleteMarkerReplication element can contain + // only Disabled. For an example configuration, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, Amazon + // S3 handled replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). // // Destination is a required field Destination *Destination `type:"structure" required:"true"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 Developer Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. ID *string `type:"string"` - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority associated with the rule. If you specify multiple rules in a + // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts + // when filtering. If two or more rules identify the same object based on a + // specified filter, the rule with higher priority takes precedence. For example: + // + // * Same object quality prefix-based filter criteria if prefixes you specified + // in multiple rules overlap + // + // * Same object qualify tag-based filter criteria specified in multiple + // rules + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon Simple Storage Service Developer Guide. + Priority *int64 `type:"integer"` - // Container for filters that define which source objects should be replicated. + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - // The rule is ignored if status is not Enabled. + // Specifies whether the rule is enabled. // // Status is a required field Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` @@ -19127,9 +27320,6 @@ func (s *ReplicationRule) Validate() error { if s.Destination == nil { invalidParams.Add(request.NewErrParamRequired("Destination")) } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -19138,6 +27328,16 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } if s.SourceSelectionCriteria != nil { if err := s.SourceSelectionCriteria.Validate(); err != nil { invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) @@ -19150,36 +27350,283 @@ func (s *ReplicationRule) Validate() error { return nil } -// SetDestination sets the Destination field's value. -func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { - s.Destination = v +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // An array of tags containing key and value pairs. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v return s } -// SetID sets the ID field's value. -func (s *ReplicationRule) SetID(v string) *ReplicationRule { - s.ID = &v +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v return s } -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { - s.Prefix = &v - return s +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid values: 15 minutes. + Minutes *int64 `type:"integer"` } -// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. -func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { - s.SourceSelectionCriteria = v - return s +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) } -// SetStatus sets the Status field's value. -func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { - s.Status = &v +// GoString returns the string representation +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v return s } +// Container for Payer. type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -19218,6 +27665,7 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } +// Container for specifying if periodic QueryProgress messages should be sent. type RequestProgress struct { _ struct{} `type:"structure"` @@ -19243,23 +27691,36 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { } type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + // The bucket name or containing the object to restore. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // VersionId used to reference a specific version of the object. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -19279,6 +27740,9 @@ func (s *RestoreObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -19334,6 +27798,20 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { return s } +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -19379,7 +27857,7 @@ type RestoreRequest struct { // The optional description for the job. Description *string `type:"string"` - // Glacier related parameters pertaining to this job. Do not use with restores + // S3 Glacier related parameters pertaining to this job. Do not use with restores // that specify OutputLocation. GlacierJobParameters *GlacierJobParameters `type:"structure"` @@ -19389,7 +27867,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -19473,6 +27951,10 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon Simple Storage Service Developer Guide. type RoutingRule struct { _ struct{} `type:"structure"` @@ -19484,7 +27966,7 @@ type RoutingRule struct { // Container for redirect information. You can redirect requests to another // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. + // you can specify a different error code to return. // // Redirect is a required field Redirect *Redirect `type:"structure" required:"true"` @@ -19525,16 +28007,24 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. For examples, see Put +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) type Rule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object. Expiration *LifecycleExpiration `type:"structure"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. + // Unique identifier for the rule. The value can't be longer than 255 characters. ID *string `type:"string"` // Specifies when noncurrent object versions expire. Upon expiration, Amazon @@ -19545,24 +28035,30 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your - // bucket is versioning-enabled (or versioning is suspended), you can set this - // action to request that Amazon S3 transition noncurrent object versions to - // the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period - // in the object's lifetime. + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // is suspended), you can set this action to request that Amazon S3 transition + // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's + // lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - // Prefix identifying one or more objects to which the rule applies. + // Object key prefix that identifies one or more objects to which this rule + // applies. // // Prefix is a required field Prefix *string `type:"string" required:"true"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. // // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon Simple Storage Service Developer Guide. Transition *Transition `type:"structure"` } @@ -19640,15 +28136,15 @@ func (s *Rule) SetTransition(v *Transition) *Rule { return s } -// Specifies the use of SSE-KMS to encrypt delievered Inventory reports. +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the AWS Key Management Service (KMS) master encryption - // key to use for encrypting Inventory reports. + // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer + // managed customer master key (CMK) to use for encrypting inventory reports. // // KeyId is a required field - KeyId *string `type:"string" required:"true"` + KeyId *string `type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -19680,7 +28176,7 @@ func (s *SSEKMS) SetKeyId(v string) *SSEKMS { return s } -// Specifies the use of SSE-S3 to encrypt delievered Inventory reports. +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } @@ -19695,75 +28191,51 @@ func (s SSES3) GoString() string { return s.String() } -// SelectObjectContentEventStream provides handling of EventStreams for -// the SelectObjectContent API. -// -// Use this type to receive SelectObjectContentEventStream events. The events -// can be read from the Events channel member. -// -// The events that can be received are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -type SelectObjectContentEventStream struct { - // Reader is the EventStream reader for the SelectObjectContentEventStream - // events. This value is automatically set by the SDK when the API call is made - // Use this member when unit testing your code with the SDK to mock out the - // EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` - // StreamCloser is the io.Closer for the EventStream connection. For HTTP - // EventStream this is the response Body. The stream will be closed when - // the Close method of the EventStream is called. - StreamCloser io.Closer + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file.For example; 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` } -// Close closes the EventStream. This will also cause the Events channel to be -// closed. You can use the closing of the Events channel to terminate your -// application's read from the API's EventStream. -// -// Will close the underlying EventStream reader. For EventStream over HTTP -// connection this will also close the HTTP connection. -// -// Close must be called when done using the EventStream API. Not calling Close -// may result in resource leaks. -func (es *SelectObjectContentEventStream) Close() (err error) { - es.Reader.Close() - return es.Err() +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) } -// Err returns any error that occurred while reading EventStream Events from -// the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.Reader.Err(); err != nil { - return err - } - es.StreamCloser.Close() +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} - return nil +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s } -// Events returns a channel to read EventStream Events from the -// SelectObjectContent API. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return es.Reader.Events() +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s } // SelectObjectContentEventStreamEvent groups together all EventStream -// events read from the SelectObjectContent API. +// events writes for SelectObjectContentEventStream. // // These events are: // @@ -19774,11 +28246,12 @@ func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEve // * StatsEvent type SelectObjectContentEventStreamEvent interface { eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler } -// SelectObjectContentEventStreamReader provides the interface for reading EventStream -// Events from the SelectObjectContent API. The -// default implementation for this interface will be SelectObjectContentEventStream. +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. // // The reader's Close method must allow multiple concurrent calls. // @@ -19789,72 +28262,59 @@ type SelectObjectContentEventStreamEvent interface { // * ProgressEvent // * RecordsEvent // * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent type SelectObjectContentEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SelectObjectContentEventStreamEvent - // Close will close the underlying event stream reader. For event stream over - // HTTP this will also close the HTTP connection. + // Close will stop the reader reading events from the stream. Close() error - // Returns any error that has occured while reading from the event stream. + // Returns any error that has occurred while reading from the event stream. Err() error } type readSelectObjectContentEventStream struct { eventReader *eventstreamapi.EventReader stream chan SelectObjectContentEventStreamEvent - errVal atomic.Value + err *eventstreamapi.OnceError done chan struct{} closeOnce sync.Once } -func newReadSelectObjectContentEventStream( - reader io.ReadCloser, - unmarshalers request.HandlerList, - logger aws.Logger, - logLevel aws.LogLevelType, -) *readSelectObjectContentEventStream { +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { r := &readSelectObjectContentEventStream{ - stream: make(chan SelectObjectContentEventStreamEvent), - done: make(chan struct{}), + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), } - - r.eventReader = eventstreamapi.NewEventReader( - reader, - protocol.HandlerPayloadUnmarshal{ - Unmarshalers: unmarshalers, - }, - r.unmarshalerForEventType, - ) - r.eventReader.UseLogger(logger, logLevel) + go r.readEventStream() return r } -// Close will close the underlying event stream reader. For EventStream over -// HTTP this will also close the HTTP connection. +// Close will close the underlying event stream reader. func (r *readSelectObjectContentEventStream) Close() error { r.closeOnce.Do(r.safeClose) - return r.Err() } +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + func (r *readSelectObjectContentEventStream) safeClose() { close(r.done) - err := r.eventReader.Close() - if err != nil { - r.errVal.Store(err) - } } func (r *readSelectObjectContentEventStream) Err() error { - if v := r.errVal.Load(); v != nil { - return v.(error) - } - - return nil + return r.err.Err() } func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { @@ -19862,6 +28322,7 @@ func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContent } func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() defer close(r.stream) for { @@ -19876,7 +28337,10 @@ func (r *readSelectObjectContentEventStream) readEventStream() { return default: } - r.errVal.Store(err) + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) return } @@ -19888,44 +28352,67 @@ func (r *readSelectObjectContentEventStream) readEventStream() { } } -func (r *readSelectObjectContentEventStream) unmarshalerForEventType( - eventType string, -) (eventstreamapi.Unmarshaler, error) { +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { switch eventType { case "Cont": return &ContinuationEvent{}, nil - case "End": return &EndEvent{}, nil - case "Progress": return &ProgressEvent{}, nil - case "Records": return &RecordsEvent{}, nil - case "Stats": return &StatsEvent{}, nil default: - return nil, awserr.New( - request.ErrCodeSerialization, - fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), - nil, - ) + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil } } +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + // Request to filter the contents of an Amazon S3 object based on a simple Structured // Query Language (SQL) statement. In the request, along with the SQL expression, -// you must also specify a data serialization format (JSON or CSV) of the object. -// Amazon S3 uses this to parse object data into records, and returns only records +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records // that match the specified SQL expression. You must also specify the data serialization -// format for the response. For more information, go to S3Select API Documentation -// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). type SelectObjectContentInput struct { _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The S3 Bucket. + // The S3 bucket. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19935,7 +28422,7 @@ type SelectObjectContentInput struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (e.g., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -19945,7 +28432,7 @@ type SelectObjectContentInput struct { // InputSerialization is a required field InputSerialization *InputSerialization `type:"structure" required:"true"` - // The Object Key. + // The object key. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -19958,17 +28445,35 @@ type SelectObjectContentInput struct { // Specifies if periodic request progress information should be enabled. RequestProgress *RequestProgress `type:"structure"` - // The SSE Algorithm used to encrypt the object. For more information, go to - // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // The SSE Customer Key. For more information, go to Server-Side Encryption - // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // The SSE Customer Key MD5. For more information, go to Server-Side Encryption - // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` } // String returns the string representation @@ -19987,6 +28492,9 @@ func (s *SelectObjectContentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Expression == nil { invalidParams.Add(request.NewErrParamRequired("Expression")) } @@ -20086,11 +28594,30 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC return s } +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` - // Use EventStream to use the API's stream. - EventStream *SelectObjectContentEventStream `type:"structure"` + EventStream *SelectObjectContentEventStream } // String returns the string representation @@ -20103,29 +28630,17 @@ func (s SelectObjectContentOutput) GoString() string { return s.String() } -// SetEventStream sets the EventStream field's value. func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { s.EventStream = v return s } +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} -func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { - if r.Error != nil { - return - } - reader := newReadSelectObjectContentEventStream( - r.HTTPResponse.Body, - r.Handlers.UnmarshalStream, - r.Config.Logger, - r.Config.LogLevel.Value(), - ) - go reader.readEventStream() - - eventStream := &SelectObjectContentEventStream{ - StreamCloser: r.HTTPResponse.Body, - Reader: reader, - } - s.EventStream = eventStream +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream } // Describes the parameters for Select job types. @@ -20137,7 +28652,7 @@ type SelectParameters struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (e.g., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -20210,14 +28725,32 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec } // Describes the default server-side encryption to apply to new objects in the -// bucket. If Put Object request does not specify any server-side encryption, -// this default encryption will be applied. +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // KMS master key ID to use for the default encryption. This parameter is allowed - // if SSEAlgorithm is aws:kms. - KMSMasterKeyID *string `type:"string"` + // AWS Key Management Service (KMS) customer master key ID to use for the default + // encryption. This parameter is allowed if and only if SSEAlgorithm is set + // to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // However, if you are using encryption with cross-account operations, you must + // use a fully qualified CMK ARN. For more information, see Using encryption + // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more + // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. // @@ -20260,8 +28793,7 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc return s } -// Container for server-side encryption configuration rules. Currently S3 supports -// one rule only. +// Specifies the default server-side-encryption configuration. type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -20311,13 +28843,12 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu return s } -// Container for information about a particular server-side encryption configuration -// rule. +// Specifies the default server-side encryption configuration. type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // Describes the default server-side encryption to apply to new objects in the - // bucket. If Put Object request does not specify any server-side encryption, + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, // this default encryption will be applied. ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } @@ -20353,11 +28884,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv return s } -// Container for filters that define which source objects should be replicated. +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // Container for filter information of selection of KMS Encrypted S3 objects. + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } @@ -20392,12 +28929,13 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb return s } -// Container for filter information of selection of KMS Encrypted S3 objects. +// A container for filter information for the selection of S3 objects encrypted +// with AWS KMS. type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` - // The replication for KMS encrypted S3 objects is disabled if status is not - // Enabled. + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using a customer master key (CMK) stored in AWS Key Management Service. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -20432,16 +28970,17 @@ func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { return s } +// Container for the stats details. type Stats struct { _ struct{} `type:"structure"` - // Total number of uncompressed object bytes processed. + // The total number of uncompressed object bytes processed. BytesProcessed *int64 `type:"long"` - // Total number of bytes of records payload data returned. + // The total number of bytes of records payload data returned. BytesReturned *int64 `type:"long"` - // Total number of object bytes scanned. + // The total number of object bytes scanned. BytesScanned *int64 `type:"long"` } @@ -20473,6 +29012,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats { return s } +// Container for the Stats Event. type StatsEvent struct { _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` @@ -20513,11 +29053,26 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // A container used to describe how data related to the storage class analysis - // should be exported. + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. DataExport *StorageClassAnalysisDataExport `type:"structure"` } @@ -20552,6 +29107,8 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -20609,10 +29166,11 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } +// A container of a key value name pair. type Tag struct { _ struct{} `type:"structure"` - // Name of the tag. + // Name of the object key. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -20664,9 +29222,12 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Container for TagSet elements. type Tagging struct { _ struct{} `type:"structure"` + // A collection for a set of tags + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -20710,9 +29271,11 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } +// Container for granting information. type TargetGrant struct { _ struct{} `type:"structure"` + // Container for the person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. @@ -20756,25 +29319,30 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } -// Container for specifying the configuration when you want Amazon S3 to publish -// events to an Amazon Simple Notification Service (Amazon SNS) topic. +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects - // events of specified type. + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -20830,15 +29398,23 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) +// instead. type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of events related to objects Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` @@ -20881,18 +29457,22 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. type Transition struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. Days *int64 `type:"integer"` - // The class of storage used to store the object. + // The storage class to which you want the object to transition. StorageClass *string `type:"string" enum:"TransitionStorageClass"` } @@ -20925,8 +29505,10 @@ func (s *Transition) SetStorageClass(v string) *Transition { } type UploadPartCopyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20952,23 +29534,26 @@ type UploadPartCopyInput struct { // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 GB. + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -20978,26 +29563,28 @@ type UploadPartCopyInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. @@ -21022,6 +29609,9 @@ func (s *UploadPartCopyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -21167,9 +29757,24 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` + // Container for all response elements. CopyPartResult *CopyPartResult `type:"structure"` // The version of the source object that was copied, if you have enabled versioning @@ -21186,16 +29791,17 @@ type UploadPartCopyOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -21252,7 +29858,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy } type UploadPartInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` // Object data. Body io.ReadSeeker `type:"blob"` @@ -21266,7 +29872,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // Object key for which the multipart upload was initiated. @@ -21280,26 +29888,28 @@ type UploadPartInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. @@ -21324,6 +29934,9 @@ func (s *UploadPartInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -21423,6 +30036,20 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -21439,16 +30066,16 @@ type UploadPartOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -21498,6 +30125,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -21532,15 +30162,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } +// Specifies website configuration parameters for an Amazon S3 bucket. type WebsiteConfiguration struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -21705,6 +30342,14 @@ const ( CompressionTypeBzip2 = "BZIP2" ) +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -21716,7 +30361,7 @@ const ( EncodingTypeUrl = "url" ) -// Bucket event for which to send notifications. +// The bucket event for which to send notifications. const ( // EventS3ReducedRedundancyLostObject is a Event enum value EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" @@ -21744,6 +30389,38 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" +) + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" ) const ( @@ -21784,6 +30461,9 @@ const ( // InventoryFormatOrc is a InventoryFormat enum value InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" ) const ( @@ -21823,6 +30503,18 @@ const ( // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" ) const ( @@ -21857,6 +30549,14 @@ const ( MetadataDirectiveReplace = "REPLACE" ) +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + const ( // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" @@ -21880,6 +30580,35 @@ const ( ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + const ( // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" @@ -21895,6 +30624,12 @@ const ( // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -21970,6 +30705,14 @@ const ( ReplicationStatusReplica = "REPLICA" ) +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + // If present, indicates that the requester was successfully charged for the // request. const ( @@ -21977,10 +30720,11 @@ const ( RequestChargedRequester = "requester" ) -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" @@ -22019,6 +30763,15 @@ const ( // StorageClassOnezoneIa is a StorageClass enum value StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -22054,6 +30807,12 @@ const ( // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go index 5c8ce5cc8..407f06b6e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -25,30 +24,6 @@ const ( appendMD5TxEncoding = "append-md5" ) -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - if !aws.IsReaderSeekable(r.Body) { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "Unable to compute Content-MD5 for unseekable body, S3.%s", - r.Operation.Name)) - } - return - } - - if _, err := copySeekableBody(h, r.Body); err != nil { - r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - v := base64.StdEncoding.EncodeToString(h.Sum(nil)) - r.HTTPRequest.Header.Set(contentMD5Header, v) -} - // computeBodyHashes will add Content MD5 and Content Sha256 hashes to the // request. If the body is not seekable or S3DisableContentMD5Validation set // this handler will be ignored. @@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) { dst = io.MultiWriter(hashers...) } - if _, err := copySeekableBody(dst, r.Body); err != nil { + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) return } @@ -119,28 +94,6 @@ const ( sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen ) -func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} - // Adds the x-amz-te: append_md5 header to the request. This requests the service // responds with a trailing MD5 checksum. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index bc68a46ac..9ba8a7887 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -80,7 +80,8 @@ func buildGetBucketLocation(r *request.Request) { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index a55beab96..a7698d5eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -3,6 +3,8 @@ package s3 import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3err" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) func init() { @@ -12,28 +14,25 @@ func init() { func defaultInitClientFn(c *client.Client) { // Support building custom endpoints based on config - c.Handlers.Build.PushFront(updateEndpointForS3Config) + c.Handlers.Build.PushFront(endpointHandler) // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) } func defaultInitRequestFn(r *request.Request) { - // Add reuest handlers for specific platforms. + // Add request handlers for specific platforms. // e.g. 100-continue support for PUT requests using Go 1.6 platformRequestHandlers(r) switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) case opGetBucketLocation: // GetBucketLocation has custom parsing logic r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) @@ -42,6 +41,7 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) case opPutObject, opUploadPart: r.Handlers.Build.PushBack(computeBodyHashes) // Disabled until #1837 root issue is resolved. @@ -68,3 +68,8 @@ type sseCustomerKeyGetter interface { type copySourceSSECustomerKeyGetter interface { getCopySourceSSECustomerKey() string } + +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index 39b912c26..7f7aca208 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -63,6 +63,20 @@ // See the s3manager package's Downloader type documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader // +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// // Get Bucket Region // // GetBucketRegion will attempt to get the region for a bucket using a region @@ -90,19 +104,6 @@ // content from S3. The Encryption and Decryption clients can be used concurrently // once the client is created. // -// sess := session.Must(session.NewSession()) -// -// // Create the decryption client. -// svc := s3crypto.NewDecryptionClient(sess) -// -// // The object will be downloaded from S3 and decrypted locally. By metadata -// // about the object's encryption will instruct the decryption client how -// // decrypt the content of the object. By default KMS is used for keys. -// result, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String(myBucket), -// Key: aws.String(myKey), -// }) -// // See the s3crypto package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 000000000..c4048fbfb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,233 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + return arn.ParseAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = newInvalidARNError(nil, err) + return + } + + resReq := resourceRequest{ + Resource: resource, + Request: req, + } + + if resReq.IsCrossPartition() { + req.Error = newClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = newClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if resReq.HasCustomEndpoint() { + req.Error = newInvalidARNWithCustomEndpointError(resource, nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = newInvalidARNError(resource, nil) + } +} + +type resourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +func (r resourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +func (r resourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +func (r resourceRequest) UseFIPS() bool { + return isFIPS(aws.StringValue(r.Request.Config.Region)) +} + +func (r resourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +func (r resourceRequest) IsCrossRegion() bool { + return isCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +func (r resourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +func isFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} +func isCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return newClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +type accessPointEndpointBuilder arn.AccessPointARN + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." +) + +func (a accessPointEndpointBuilder) Build(req *request.Request) error { + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if isFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion) + if err != nil { + return newFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + const serviceEndpointLabel = "s3-accesspoint" + + // dualstack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + + protocol.HostPrefixBuilder{ + Prefix: accesPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + req.ClientInfo.SigningName = endpoint.SigningName + req.ClientInfo.SigningRegion = endpoint.SigningRegion + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return newInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go new file mode 100644 index 000000000..9df03e78d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go @@ -0,0 +1,151 @@ +package s3 + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +type invalidARNError struct { + message string + resource arn.Resource + origErr error +} + +func (e invalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e invalidARNError) Code() string { + return invalidARNErrorErrCode +} + +func (e invalidARNError) Message() string { + return e.message +} + +func (e invalidARNError) OrigErr() error { + return e.origErr +} + +func newInvalidARNError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// ARN not supported for the target partition +func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +type configurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +func (e configurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e configurationError) Code() string { + return configurationErrorErrCode +} + +func (e configurationError) Message() string { + return e.message +} + +func (e configurationError) OrigErr() error { + return e.origErr +} + +func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for S3 Accelerate but is supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index 931cb17bb..49aeff16f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -13,6 +13,12 @@ const ( // ErrCodeBucketAlreadyOwnedByYou for service response error code // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" // ErrCodeNoSuchBucket for service response error code @@ -36,13 +42,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier + // This operation is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon Glacier. + // only stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index a7fbc2de2..81cdec1ae 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{ opListBuckets, opCreateBucket, opDeleteBucket, } -// Request handler to automatically add the bucket name to the endpoint domain +// Automatically add the bucket name to the endpoint domain // if possible. This style of bucket is valid for all bucket names which are // DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request) { +func updateEndpointForS3Config(r *request.Request, bucketName string) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) @@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) { r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") } } - updateEndpointForAccelerate(r) + updateEndpointForAccelerate(r, bucketName) } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r) + updateEndpointForHostStyle(r, bucketName) } } -func updateEndpointForHostStyle(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { // bucket name must be valid to put into the host return } - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } var ( accelElem = []byte("s3-accelerate.dualstack.") ) -func updateEndpointForAccelerate(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), nil) return } @@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) { r.HTTPRequest.URL.Host = strings.Join(parts, ".") - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } // Attempts to retrieve the bucket name from the request input parameters. @@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool { // moveBucketToHost moves the bucket name from the URI path to URL host. func moveBucketToHost(u *url.URL, bucket string) { u.Host = bucket + "." + u.Host - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } + removeBucketFromPath(u) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go new file mode 100644 index 000000000..2f93f96fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go @@ -0,0 +1,45 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{a, "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{a, "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go new file mode 100644 index 000000000..a942d887f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go @@ -0,0 +1,71 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{a, "partition not set"} + } + if a.Service != "s3" { + return nil, InvalidARNError{a, "service is not S3"} + } + if len(a.Resource) == 0 { + return nil, InvalidARNError{a, "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +func (e InvalidARNError) Error() string { + return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index cc427882b..2646a4272 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -136,6 +136,10 @@ type S3API interface { DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error) DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) + GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) @@ -192,6 +196,10 @@ type S3API interface { GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error) GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error) GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) @@ -220,6 +228,18 @@ type S3API interface { GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error) GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) + + GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) + + GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) + GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error) GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) @@ -228,6 +248,10 @@ type S3API interface { GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error) GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error) HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) @@ -367,10 +391,26 @@ type S3API interface { PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error) PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) + + PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) + + PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) + PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error) PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) + PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error) RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go index 7af4fd80d..22bd0b7ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go @@ -273,7 +273,7 @@ type DeleteObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (iter *DeleteObjectsIterator) Next() bool { if iter.inc { @@ -338,6 +338,11 @@ func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error { } } + // iter.Next() could return false (above) plus populate iter.Err() + if iter.Err() != nil { + errs = append(errs, newError(iter.Err(), nil, nil)) + } + if input != nil && len(input.Delete.Objects) > 0 { if err := deleteBatch(ctx, d, input, objects); err != nil { errs = append(errs, err...) @@ -453,7 +458,7 @@ type DownloadObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (batcher *DownloadObjectsIterator) Next() bool { if batcher.inc { @@ -492,7 +497,7 @@ type UploadObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (batcher *UploadObjectsIterator) Next() bool { if batcher.inc { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go index f61665a58..9cc1e5970 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go @@ -3,6 +3,7 @@ package s3manager import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" @@ -35,6 +36,30 @@ import ( // } // fmt.Printf("Bucket %s is in %s region\n", bucket, region) // +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// sess, err := session.NewSession(&aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }) +// +// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "") func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) { var cfg aws.Config if len(regionHint) != 0 { @@ -50,12 +75,38 @@ const bucketRegionHeader = "X-Amz-Bucket-Region" // that it takes a S3 service client instead of a Session. The regionHint is // derived from the region the S3 service client was created in. // +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// region, err := s3manager.GetBucketRegionWithClient(context.Background(), +// s3.New(sess, &aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }), +// "bucketname") +// // See GetBucketRegion for more information. func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) { req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{ Bucket: aws.String(bucket), }) req.Config.S3ForcePathStyle = aws.Bool(true) + req.Config.Credentials = credentials.AnonymousCredentials req.SetContext(ctx) @@ -75,6 +126,16 @@ func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string r.HTTPResponse.Status = "OK" r.Error = nil }) + // Replace the endpoint validation handler to not require a region if an + // endpoint URL was specified. Since these requests are not authenticated, + // requiring a region is not needed when an endpoint URL is provided. + req.Handlers.Validate.Swap( + corehandlers.ValidateEndpointHandler.Name, + request.NamedHandler{ + Name: "validateEndpointWithoutRegion", + Fn: validateEndpointWithoutRegion, + }, + ) req.ApplyOptions(opts...) @@ -86,3 +147,13 @@ func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string return bucketRegion, nil } + +func validateEndpointWithoutRegion(r *request.Request) { + // Check if the caller provided an explicit URL instead of one derived by + // the SDK's endpoint resolver. For GetBucketRegion, with an explicit + // endpoint URL, a region is not needed. If no endpoint URL is provided, + // fallback the SDK's standard endpoint validation handler. + if len(aws.StringValue(r.Config.Endpoint)) == 0 { + corehandlers.ValidateEndpointHandler.Fn(r) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go new file mode 100644 index 000000000..f1d9e85c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go @@ -0,0 +1,81 @@ +package s3manager + +import ( + "io" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go new file mode 100644 index 000000000..42276530a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go new file mode 100644 index 000000000..687082c30 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go new file mode 100644 index 000000000..ada50c243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go new file mode 100644 index 000000000..7e9d9579f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go index 3f38d8b5a..4b54b7c03 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -25,13 +25,25 @@ const DefaultDownloadPartSize = 1024 * 1024 * 5 // when using Download(). const DefaultDownloadConcurrency = 5 +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + // The Downloader structure that calls Download(). It is safe to call Download() // on this structure for multiple objects and across concurrent goroutines. // Mutating the Downloader's properties is not safe to be done concurrently. type Downloader struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultDownloadPartSize value will be used. + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. // // PartSize is ignored if the Range input parameter is provided. PartSize int64 @@ -50,6 +62,14 @@ type Downloader struct { // List of request options that will be passed down to individual API // operation requests made by the downloader. RequestOptions []request.Option + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider } // WithDownloaderRequestOptions appends to the Downloader's API request options. @@ -77,10 +97,15 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) { // d.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + return newDownloader(s3.New(c), options...) +} + +func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader { d := &Downloader{ - S3: s3.New(c), - PartSize: DefaultDownloadPartSize, - Concurrency: DefaultDownloadConcurrency, + S3: client, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), } for _, option := range options { option(d) @@ -99,7 +124,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl // sess := session.Must(session.NewSession()) // // // The S3 client the S3 Downloader will use -// s3Svc := s3.new(sess) +// s3Svc := s3.New(sess) // // // Create a downloader with the s3 client and default options // downloader := s3manager.NewDownloaderWithClient(s3Svc) @@ -109,16 +134,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl // d.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { - d := &Downloader{ - S3: svc, - PartSize: DefaultDownloadPartSize, - Concurrency: DefaultDownloadConcurrency, - } - for _, option := range options { - option(d) - } - - return d + return newDownloader(svc, options...) } type maxRetrier interface { @@ -126,7 +142,8 @@ type maxRetrier interface { } // Download downloads an object in S3 and writes the payload into w using -// concurrent GET requests. +// concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. // // Additional functional options can be provided to configure the individual // download. These options are copies of the Downloader instance Download is called from. @@ -148,7 +165,8 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options .. } // DownloadWithContext downloads an object in S3 and writes the payload into w -// using concurrent GET requests. +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. // // DownloadWithContext is the same as Download with the additional support for // Context input parameters. The Context must not be nil. A nil Context will @@ -215,14 +233,14 @@ func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s // // objects := []s3manager.BatchDownloadObject { // { -// Input: &s3.GetObjectInput { +// Object: &s3.GetObjectInput { // Bucket: aws.String("bucket"), // Key: aws.String("foo"), // }, // Writer: fooFile, // }, // { -// Input: &s3.GetObjectInput { +// Object: &s3.GetObjectInput { // Bucket: aws.String("bucket"), // Key: aws.String("bar"), // }, @@ -403,18 +421,20 @@ func (d *downloader) downloadChunk(chunk dlchunk) error { var n int64 var err error for retry := 0; retry <= d.partBodyMaxRetries; retry++ { - var resp *s3.GetObjectOutput - resp, err = d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) - if err != nil { - return err - } - d.setTotalBytes(resp) // Set total if not yet set. - - n, err = io.Copy(&chunk, resp.Body) - resp.Body.Close() + n, err = d.tryDownloadChunk(in, &chunk) if err == nil { break } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } chunk.cur = 0 logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries, @@ -427,6 +447,28 @@ func (d *downloader) downloadChunk(chunk dlchunk) error { return err } +func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(w, resp.Body) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { s, ok := svc.(*s3.S3) if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go new file mode 100644 index 000000000..05113286d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go @@ -0,0 +1,244 @@ +package s3manager + +import ( + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +type byteSlicePool interface { + Get(aws.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go new file mode 100644 index 000000000..f62e1a45e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package s3manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go index 0adfa8ac9..8770d4041 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -6,12 +6,12 @@ import ( "io" "sort" "sync" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" @@ -96,100 +96,6 @@ func (m multiUploadError) UploadID() string { return m.uploadID } -// UploadInput contains all input for upload requests to Amazon S3. -type UploadInput struct { - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // The base64-encoded 128-bit MD5 digest of the part data. - ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256, - // aws:kms). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - // The readable body payload to send to S3. - Body io.Reader -} - // UploadOutput represents a response from the Upload() call. type UploadOutput struct { // The URL where the object was uploaded to. @@ -239,8 +145,15 @@ type Uploader struct { // MaxUploadParts is the max number of parts which will be uploaded to S3. // Will be used to calculate the partsize of the object to be uploaded. // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. - // With a limited of s3.MaxUploadParts (10,000 parts). + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. + // + // Defaults to package const's MaxUploadParts value. MaxUploadParts int // The client to use when uploading to S3. @@ -249,6 +162,12 @@ type Uploader struct { // List of request options that will be passed down to individual API // operation requests made by the uploader. RequestOptions []request.Option + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool } // NewUploader creates a new Uploader instance to upload objects to S3. Pass In @@ -268,18 +187,25 @@ type Uploader struct { // u.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + return newUploader(s3.New(c), options...) +} + +func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader { u := &Uploader{ - S3: s3.New(c), + S3: client, PartSize: DefaultUploadPartSize, Concurrency: DefaultUploadConcurrency, LeavePartsOnError: false, MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), } for _, option := range options { option(u) } + u.partPool = newByteSlicePool(u.PartSize) + return u } @@ -302,19 +228,7 @@ func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader // u.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { - u := &Uploader{ - S3: svc, - PartSize: DefaultUploadPartSize, - Concurrency: DefaultUploadConcurrency, - LeavePartsOnError: false, - MaxUploadParts: MaxUploadParts, - } - - for _, option := range options { - option(u) - } - - return u + return newUploader(svc, options...) } // Upload uploads an object to S3, intelligently buffering large files into @@ -374,6 +288,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts .. for _, opt := range opts { opt(&i.cfg) } + i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) return i.upload() @@ -443,14 +358,15 @@ type uploader struct { readerPos int64 // current reader position totalSize int64 // set to -1 if the size is not known - - bufferPool sync.Pool } // internal logic for deciding whether to upload a single part or use a // multipart upload. func (u *uploader) upload() (*UploadOutput, error) { - u.init() + if err := u.init(); err != nil { + return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) + } + defer u.cfg.partPool.Close() if u.cfg.PartSize < MinUploadPartSize { msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) @@ -458,44 +374,59 @@ func (u *uploader) upload() (*UploadOutput, error) { } // Do one read to determine if we have more than one part - reader, _, part, err := u.nextReader() + reader, _, cleanup, err := u.nextReader() if err == io.EOF { // single part - return u.singlePart(reader) + return u.singlePart(reader, cleanup) } else if err != nil { + cleanup() return nil, awserr.New("ReadRequestBody", "read upload data failed", err) } mu := multiuploader{uploader: u} - return mu.upload(reader, part) + return mu.upload(reader, cleanup) } // init will initialize all default options. -func (u *uploader) init() { +func (u *uploader) init() error { if u.cfg.Concurrency == 0 { u.cfg.Concurrency = DefaultUploadConcurrency } if u.cfg.PartSize == 0 { u.cfg.PartSize = DefaultUploadPartSize } - - u.bufferPool = sync.Pool{ - New: func() interface{} { return make([]byte, u.cfg.PartSize) }, + if u.cfg.MaxUploadParts == 0 { + u.cfg.MaxUploadParts = MaxUploadParts } // Try to get the total size for some optimizations - u.initSize() + if err := u.initSize(); err != nil { + return err + } + + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil } // initSize tries to detect the total stream size, setting u.totalSize. If // the size is not known, totalSize is set to -1. -func (u *uploader) initSize() { +func (u *uploader) initSize() error { u.totalSize = -1 switch r := u.in.Body.(type) { case io.Seeker: n, err := aws.SeekerLen(r) if err != nil { - return + return err } u.totalSize = n @@ -507,17 +438,15 @@ func (u *uploader) initSize() { u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 } } + + return nil } // nextReader returns a seekable reader representing the next packet of data. // This operation increases the shared u.readerPos counter, but note that it // does not need to be wrapped in a mutex because nextReader is only called // from the main thread. -func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) { - type readerAtSeeker interface { - io.ReaderAt - io.ReadSeeker - } +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { switch r := u.in.Body.(type) { case readerAtSeeker: var err error @@ -532,17 +461,36 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) { } } - reader := io.NewSectionReader(r, u.readerPos, n) + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + u.readerPos += n - return reader, int(n), nil, err + return reader, int(n), cleanup, err default: - part := u.bufferPool.Get().([]byte) - n, err := readFillBuf(r, part) + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) u.readerPos += int64(n) - return bytes.NewReader(part[0:n]), n, part, err + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err } } @@ -559,10 +507,12 @@ func readFillBuf(r io.Reader, b []byte) (offset int, err error) { // singlePart contains upload logic for uploading a single chunk via // a regular PutObject request. Multipart requests require at least two // parts, or at least 5MB of data. -func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + params := &s3.PutObjectInput{} awsutil.Copy(params, u.in) - params.Body = buf + params.Body = r // Need to use request form because URL generated in request is // used in return. @@ -592,9 +542,9 @@ type multiuploader struct { // keeps track of a single chunk of data being sent to S3. type chunk struct { - buf io.ReadSeeker - part []byte - num int64 + buf io.ReadSeeker + num int64 + cleanup func() } // completedParts is a wrapper to make parts sortable by their part number, @@ -607,13 +557,14 @@ func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].Pa // upload will perform a multipart upload using the firstBuf buffer containing // the first chunk of data. -func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) { +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { params := &s3.CreateMultipartUploadInput{} awsutil.Copy(params, u.in) // Create the multipart resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) if err != nil { + cleanup() return nil, err } u.uploadID = *resp.UploadId @@ -627,46 +578,29 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa // Send part 1 to the workers var num int64 = 1 - ch <- chunk{buf: firstBuf, part: firstPart, num: num} + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} // Read and queue the rest of the parts for u.geterr() == nil && err == nil { - num++ - // This upload exceeded maximum number of supported parts, error now. - if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { - var msg string - if num > int64(u.cfg.MaxUploadParts) { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) } - u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) - break - } - - var reader io.ReadSeeker - var nextChunkLen int - var part []byte - reader, nextChunkLen, part, err = u.nextReader() - - if err != nil && err != io.EOF { - u.seterr(awserr.New( - "ReadRequestBody", - "read multipart upload data failed", - err)) break } - if nextChunkLen == 0 { - // No need to upload empty part, if file was empty to start - // with empty single part would of been created and never - // started multipart upload. - break - } + num++ - ch <- chunk{buf: reader, part: part, num: num} + ch <- chunk{buf: reader, num: num, cleanup: cleanup} } // Close the channel, wait for workers, and complete upload @@ -683,13 +617,53 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa uploadID: u.uploadID, } } + + // Create a presigned URL of the S3 Get Object in order to have parity with + // single part upload. + getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + }) + getReq.Config.Credentials = credentials.AnonymousCredentials + getReq.SetContext(u.ctx) + uploadLocation, _, _ := getReq.PresignRequest(1) + return &UploadOutput{ - Location: aws.StringValue(complete.Location), + Location: uploadLocation, VersionID: complete.VersionId, UploadID: u.uploadID, }, nil } +func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) { + var msg string + if part > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, awserr.New("TotalPartsExceeded", msg, nil) + } + + return true, err +} + // readChunk runs in worker goroutines to pull chunks off of the ch channel // and send() them as UploadPart requests. func (u *multiuploader) readChunk(ch chan chunk) { @@ -706,6 +680,8 @@ func (u *multiuploader) readChunk(ch chan chunk) { u.seterr(err) } } + + data.cleanup() } } @@ -721,9 +697,8 @@ func (u *multiuploader) send(c chunk) error { SSECustomerKey: u.in.SSECustomerKey, PartNumber: &c.num, } + resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...) - // put the byte array back into the pool to conserve memory - u.bufferPool.Put(c.part) if err != nil { return err } @@ -795,3 +770,8 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { return resp } + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go new file mode 100644 index 000000000..e1e609d0e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -0,0 +1,171 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3manager + +import ( + "io" + "time" +) + +// UploadInput provides the input parameters for uploading a stream or buffer +// to an object in an Amazon S3 bucket. This type is similar to the s3 +// package's PutObjectInput with the exception that the Body member is an +// io.Reader instead of an io.ReadSeeker. +type UploadInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The readable body payload to send to S3. + Body io.Reader + + // Bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // If you don't specify, S3 Standard is the default storage class. Amazon S3 + // supports other storage classes. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go new file mode 100644 index 000000000..765dc07ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go @@ -0,0 +1,75 @@ +package s3manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 20de53f29..b4c07b4d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "s3" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "S3" // ServiceID is a unique identifer of a specific service. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a S3 client from just a session. // svc := s3.New(mySession) // @@ -46,11 +48,11 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, @@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2006-03-01", }, @@ -67,12 +70,15 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) // Run custom client initialization if present diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 8010c4fa1..b71c835de 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -3,6 +3,7 @@ package s3 import ( "crypto/md5" "encoding/base64" + "net/http" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) { } } -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() } - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 9f33efc6c..247770e4c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -2,6 +2,7 @@ package s3 import ( "bytes" + "io" "io/ioutil" "net/http" @@ -13,24 +14,29 @@ import ( func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } body := bytes.NewReader(b) r.HTTPResponse.Body = ioutil.NopCloser(body) defer body.Seek(0, sdkio.SeekStart) - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { r.Error = nil return } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index bcca8627a..6eecf6691 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -1,6 +1,7 @@ package s3 import ( + "bytes" "encoding/xml" "fmt" "io" @@ -11,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -23,54 +25,63 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") - // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } - var errCode, errMsg string - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) } else { - errCode = resp.Code - errMsg = resp.Message - err = nil + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) } - // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { - statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText - } + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), r.HTTPResponse.StatusCode, r.RequestID, - ), - hostID: hostID, + ) + return } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } // A RequestFailure provides access to the S3 Request ID and Host ID values @@ -84,20 +95,20 @@ type RequestFailure interface { HostID() string } -type requestFailure struct { - awserr.RequestFailure +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) - hostID string -} + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } -func (r requestFailure) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", - r.StatusCode(), r.RequestID(), r.hostID) - return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} -func (r requestFailure) String() string { - return r.Error() -} -func (r requestFailure) HostID() string { - return r.hostID + return err } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 6f89a796e..550b5f687 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -3,10 +3,12 @@ package sts import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" ) @@ -15,7 +17,7 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -54,39 +56,29 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) that you can use to access -// AWS resources that you might not normally have access to. Typically, you -// use AssumeRole for cross-account access or federation. For a comparison of -// AssumeRole with the other APIs that produce temporary credentials, see Requesting -// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Important: You cannot call AssumeRole by using AWS root account credentials; -// access is denied. You must use credentials for an IAM user or an IAM role -// to call AssumeRole. +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. // // For cross-account access, imagine that you own multiple accounts and need // to access resources in each account. You could create long-term credentials // in each account to access those resources. However, managing all those credentials // and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account and -// then use temporary security credentials to access all the other accounts +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts // by assuming roles in those accounts. For more information about roles, see -// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // -// For federation, you can, for example, grant single sign-on access to the -// AWS Management Console. If you already have an identity and authentication -// system in your corporate network, you don't have to recreate user identities -// in AWS in order to grant those user identities access to AWS. Instead, after -// a user has been authenticated, you call AssumeRole (and specify the role -// with the appropriate permissions) to get temporary security credentials for -// that user. With those temporary security credentials, you construct a sign-in -// URL that users can use to access the console. For more information, see Common -// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) -// in the IAM User Guide. +// Session Duration // // By default, the temporary security credentials created by AssumeRole last // for one hour. However, you can use the optional DurationSeconds parameter @@ -94,69 +86,93 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // seconds (15 minutes) up to the maximum session duration setting for the role. // This setting can have a value from 1 hour to 12 hours. To learn how to view // the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: you cannot call -// the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role, your AWS account must be trusted by the role. The trust -// relationship is defined in the role's trust policy when the role is created. -// That trust policy states which accounts are allowed to delegate access to -// this account's role. -// -// The user who wants to access the role must also have permissions delegated -// from the role's administrator. If the user is in a different account than -// the role, then the user's administrator must attach a policy that allows -// the user to call AssumeRole on the ARN of the role in the other account. -// If the user is in the same account as the role, then you can either attach -// a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy. -// In this case, the trust policy acts as the only resource-based policy in -// IAM, and users in the same account as the role do not need explicit permission -// to assume the role. For more information about trust policies and resource-based -// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // // Using MFA with AssumeRole // -// You can optionally include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios in which -// you want to make sure that the user who is assuming the role has been authenticated -// using an AWS MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication; if the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. // // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // -// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) // in the IAM User Guide guide. // // To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode // parameters. The SerialNumber value identifies the user's hardware or virtual // MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA devices produces. +// the MFA device produces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -171,15 +187,24 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole @@ -209,7 +234,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -243,6 +268,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re output = &AssumeRoleWithSAMLOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } @@ -252,15 +278,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // via a SAML authentication response. This operation provides a mechanism for // tying an enterprise identity store or directory to role-based AWS access // without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. Your role session lasts for the @@ -269,38 +297,33 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session // duration setting for the role. This setting can have a value from 1 hour // to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by the intersection of both the access policy -// of the role that is being assumed, and the policy that you pass. This means -// that both policies must grant the permission for the action to be allowed. -// This gives you a way to further restrict the permissions for the resulting -// temporary security credentials. You cannot use the passed policy to grant -// permissions that are in excess of those allowed by the access policy of the -// role that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider, and create -// an IAM role that specifies this SAML provider in its trust policy. -// // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document // that is uploaded for the SAML provider entity for your identity provider. @@ -308,21 +331,63 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail // logs. The entry includes the value in the NameID element of the SAML assertion. // We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the Persistent -// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) // in the IAM User Guide. // -// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) // in the IAM User Guide. // -// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) // in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -338,9 +403,18 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -361,7 +435,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML @@ -391,7 +465,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -425,41 +499,44 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI output = &AssumeRoleWithWebIdentityOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } // AssumeRoleWithWebIdentity API operation for AWS Security Token Service. // // Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider, such as Amazon -// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible -// identity provider. +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. // // For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely -// identify a user and supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview -// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the AWS SDK for iOS Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of AWS security // credentials. Therefore, you can distribute an application (for example, on // mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application, and without deploying server-based -// proxy services that use long-term AWS credentials. Instead, the identity -// of the caller is validated by using a token from the web identity provider. -// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce -// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service APIs. +// temporary security credentials to sign calls to AWS service API operations. +// +// Session Duration // // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter @@ -467,31 +544,69 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // seconds (15 minutes) up to the maximum session duration setting for the role. // This setting can have a value from 1 hour to 12 hours. To learn how to view // the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: -// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that // the application can assume. The role that your application assumes must trust @@ -508,21 +623,19 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// This interactive website lets you walk through the process of authenticating -// via Login with Amazon, Facebook, or Google, getting temporary security -// credentials, and then using those credentials to make a request to AWS. +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. // -// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android -// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample -// apps that show how to invoke the identity providers, and then how to use -// the information from these providers to get and use temporary security -// credentials. +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // // * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of @@ -542,9 +655,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -554,11 +676,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // can also mean that the claim has expired or has been explicitly revoked. // // * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the non-AWS identity provider -// (IDP) that was asked to verify the incoming identity token could not be reached. -// This is often a transient error caused by network conditions. Retry the request +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the -// error persists, the non-AWS identity provider might be down or not responding. +// error persists, the identity provider might be down or not responding. // // * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get @@ -572,7 +694,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity @@ -602,7 +724,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -644,17 +766,17 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // Decodes additional information about the authorization status of a request // from an encoded message returned in response to an AWS request. // -// For example, if a user is not authorized to perform an action that he or -// she has requested, the request returns a Client.UnauthorizedOperation response -// (an HTTP 403 response). Some AWS actions additionally return an encoded message -// that can provide details about this authorization failure. +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. // -// Only certain AWS actions return an encoded authorization message. The documentation -// for an individual action indicates whether that action returns an encoded -// message in addition to returning an HTTP code. +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the action +// constitute privileged information that the user who requested the operation // should not see. To decode an authorization status message, a user must be // granted permissions via an IAM policy to request the DecodeAuthorizationMessage // (sts:DecodeAuthorizationMessage) action. @@ -663,7 +785,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // * Whether the request was denied due to an explicit deny or due to the // absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) // in the IAM User Guide. // // * The principal who made the request. @@ -709,12 +831,109 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco return out, req.Send() } +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -753,8 +972,16 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // GetCallerIdentity API operation for AWS Security Token Service. // -// Returns details about the IAM identity whose credentials are used to call -// the API. +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -789,7 +1016,7 @@ const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -831,81 +1058,92 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Returns a set of temporary security credentials (consisting of an access // key ID, a secret access key, and a security token) for a federated user. // A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. Because -// you must call the GetFederationToken action using the long-term security -// credentials of an IAM user, this call is appropriate in contexts where those +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those // credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other APIs that produce temporary -// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// If you are creating a mobile-based or browser-based app that can authenticate +// You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider, we recommend that you -// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// The GetFederationToken action must be called by using the long-term AWS security -// credentials of an IAM user. You can also call GetFederationToken using the -// security credentials of an AWS root account, but we do not recommended it. -// Instead, we recommend that you create an IAM user for the purpose of the -// proxy application and then attach a policy to the IAM user that limits federated -// users to only the actions and resources that they need access to. For more -// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// The temporary security credentials that are obtained by using the long-term -// credentials of an IAM user are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default -// is 43200 seconds (12 hours). Temporary credentials that are obtained by using -// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). -// -// The temporary security credentials created by GetFederationToken can be used -// to make API calls to any AWS service with the following exceptions: +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. // -// * You cannot use these credentials to call any IAM APIs. +// Session duration // -// * You cannot call any STS APIs except GetCallerIdentity. +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). // // Permissions // -// The permissions for the temporary security credentials returned by GetFederationToken -// are determined by a combination of the following: -// -// * The policy or policies that are attached to the IAM user whose credentials -// are used to call GetFederationToken. -// -// * The policy that is passed as a parameter in the call. -// -// The passed policy is attached to the temporary security credentials that -// result from the GetFederationToken API call--that is, to the federated user. -// When the federated user makes an AWS request, AWS evaluates the policy attached -// to the federated user in combination with the policy or policies attached -// to the IAM user whose credentials were used to call GetFederationToken. AWS -// allows the federated user's request only when both the federated user and -// the IAM user are explicitly allowed to perform the requested action. The -// passed policy cannot grant more permissions than those that are defined in -// the IAM user policy. -// -// A typical use case is that the permissions of the IAM user whose credentials -// are used to call GetFederationToken are designed to allow access to all the -// actions and resources that any federated user will need. Then, for individual -// users, you pass a policy to the operation that scopes down the permissions -// to a level that's appropriate to that individual user, using a policy that -// allows only a subset of permissions that are granted to the IAM user. -// -// If you do not pass a policy, the resulting temporary security credentials -// have no effective permissions. The only exception is when the temporary security -// credentials are used to access a resource that has a resource-based policy -// that specifically allows the federated user to access the resource. -// -// For more information about how permissions work, see Permissions for GetFederationToken -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). -// For information about using GetFederationToken to create temporary security -// credentials, see GetFederationToken—Federation Through a Custom Identity -// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -920,15 +1158,24 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken @@ -958,7 +1205,7 @@ const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1000,48 +1247,51 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // Returns a set of temporary credentials for an AWS account or IAM user. The // credentials consist of an access key ID, a secret access key, and a security // token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled -// IAM users would need to call GetSessionToken and submit an MFA code that -// is associated with their MFA device. Using the temporary security credentials -// that are returned from the call, IAM users can then make programmatic calls -// to APIs that require MFA authentication. If you do not supply a correct MFA -// code, then the API returns an access denied error. For a comparison of GetSessionToken -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// The GetSessionToken action must be called by using the long-term AWS security -// credentials of the AWS account or an IAM user. Credentials that are created -// by IAM users are valid for the duration that you specify, from 900 seconds -// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default -// of 43200 seconds (12 hours); credentials that are created by using account -// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 -// seconds (1 hour), with a default of 1 hour. +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// Permissions // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot call any IAM APIs unless MFA authentication information is -// included in the request. +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. // -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with root account credentials. -// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, // and using IAM users for everyday interaction with AWS. // -// The permissions associated with the temporary security credentials returned -// by GetSessionToken are based on the permissions associated with account or -// IAM user whose credentials are used to call the action. If GetSessionToken -// is called using root account credentials, the temporary credentials have -// root account permissions. Similarly, if GetSessionToken is called using the -// credentials of an IAM user, the temporary credentials have the same permissions -// as the IAM user. +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1056,7 +1306,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken @@ -1091,7 +1341,7 @@ type AssumeRoleInput struct { // a session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum // value for your role, see View the Maximum Session Duration Setting for a - // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1101,51 +1351,77 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // A unique identifier that is used by third parties when assuming roles in - // their customers' accounts. For each role that the third party can assume, - // they should instruct their customers to ensure the role's trust policy checks - // for the external ID that the third party generated. Each time the third party - // assumes the role, they should pass the customer's external ID. The external - // ID is useful in order to help third parties bind a role to the customer who - // created it. For more information about the external ID, see How to Use an - // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // - // The regex used to validated this parameter is a string of characters consisting + // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` - // An IAM policy in JSON format. - // - // This parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both (the intersection of) the access policy of the role that - // is being assumed, and the policy that you pass. This gives you a way to further - // restrict the permissions for the resulting temporary security credentials. - // You cannot use the passed policy to grant permissions that are in excess - // of those allowed by the access policy of the role that is being assumed. - // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, - // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1158,8 +1434,8 @@ type AssumeRoleInput struct { // scenarios, the role session name is visible to, and can be logged by the // account that owns the role. The role session name is also used in the ARN // of the assumed role principal. This means that subsequent cross-account API - // requests using the temporary security credentials will expose the role session - // name to the external account in their CloudTrail logs. + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -1179,6 +1455,41 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -1187,6 +1498,19 @@ type AssumeRoleInput struct { // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` } // String returns the string representation @@ -1229,6 +1553,26 @@ func (s *AssumeRoleInput) Validate() error { if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1254,6 +1598,12 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1272,12 +1622,24 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { return s } +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + // SetTokenCode sets the TokenCode field's value. func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { s.TokenCode = &v return s } +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. type AssumeRoleOutput struct { @@ -1293,15 +1655,14 @@ type AssumeRoleOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -1346,7 +1707,7 @@ type AssumeRoleWithSAMLInput struct { // specify a session duration of 12 hours, but your administrator set the maximum // session duration to 6 hours, your operation fails. To learn how to view the // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1356,36 +1717,60 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes // the IdP. // @@ -1399,11 +1784,11 @@ type AssumeRoleWithSAMLInput struct { // The base-64 encoded SAML authentication response provided by the IdP. // - // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the Using IAM guide. + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -1443,6 +1828,16 @@ func (s *AssumeRoleWithSAMLInput) Validate() error { if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1462,6 +1857,12 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + // SetPrincipalArn sets the PrincipalArn field's value. func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { s.PrincipalArn = &v @@ -1496,10 +1897,8 @@ type AssumeRoleWithSAMLOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // The value of the Issuer element of the SAML assertion. @@ -1516,9 +1915,10 @@ type AssumeRoleWithSAMLOutput struct { // ) ) NameQualifier *string `type:"string"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The value of the NameID element in the Subject element of the SAML assertion. @@ -1603,7 +2003,7 @@ type AssumeRoleWithWebIdentityInput struct { // a session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum // value for your role, see View the Maximum Session Duration Setting for a - // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1613,35 +2013,60 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The fully qualified host component of the domain name of the identity provider. // // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com @@ -1675,7 +2100,7 @@ type AssumeRoleWithWebIdentityInput struct { // the application makes an AssumeRoleWithWebIdentity call. // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -1718,6 +2143,16 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error { if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1737,6 +2172,12 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + // SetProviderId sets the ProviderId field's value. func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { s.ProviderId = &v @@ -1781,19 +2222,18 @@ type AssumeRoleWithWebIdentityOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect - // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed // in the AssumeRoleWithWebIdentity request. Provider *string `type:"string"` @@ -1860,8 +2300,8 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2028,8 +2468,8 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2063,6 +2503,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2090,8 +2597,8 @@ type GetCallerIdentityOutput struct { Arn *string `min:"20" type:"string"` // The unique identifier of the calling entity. The exact value depends on the - // type of entity making the call. The values returned are those listed in the - // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) // found on the Policy Variables reference page in the IAM User Guide. UserId *string `type:"string"` } @@ -2128,12 +2635,11 @@ type GetFederationTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds - // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained - // using AWS account (root) credentials are restricted to a maximum of 3600 + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using AWS account (root) credentials defaults to one - // hour. + // session obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2148,36 +2654,107 @@ type GetFederationTokenInput struct { // Name is a required field Name *string `min:"2" type:"string" required:"true"` - // An IAM policy in JSON format that is passed with the GetFederationToken call - // and evaluated along with the policy or policies that are attached to the - // IAM user whose credentials are used to call GetFederationToken. The passed - // policy is used to scope down the permissions that are available to the IAM - // user, by allowing only a subset of the permissions that are granted to the - // IAM user. The passed policy cannot grant more permissions than those granted - // to the IAM user. The final permissions for the federated user are the most - // restrictive set based on the intersection of the passed policy and the IAM - // user policy. - // - // If you do not pass a policy, the resulting temporary security credentials - // have no effective permissions. The only exception is when the temporary security - // credentials are used to access a resource that has a resource-based policy - // that specifically allows the federated user to access the resource. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. // - // For more information about how permissions work, see Permissions for GetFederationToken - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2205,6 +2782,26 @@ func (s *GetFederationTokenInput) Validate() error { if s.Policy != nil && len(*s.Policy) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2230,6 +2827,18 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2238,10 +2847,8 @@ type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // Identifiers for the federated user associated with the credentials (such @@ -2250,9 +2857,10 @@ type GetFederationTokenOutput struct { // an Amazon S3 bucket policy. FederatedUser *FederatedUser `type:"structure"` - // A percentage value indicating the size of the policy in packed form. The - // service rejects policies for which the packed size is greater than 100 percent - // of the allowed value. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -2288,11 +2896,11 @@ type GetSessionTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 - // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). - // If the duration is longer than one hour, the session for AWS account owners - // defaults to one hour. + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The identification number of the MFA device that is associated with the IAM @@ -2303,16 +2911,16 @@ type GetSessionTokenInput struct { // You can find the device for an IAM user by going to the AWS Management Console // and viewing the user's security credentials. // - // The regex used to validated this parameter is a string of characters consisting + // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, and the user does not provide a code when requesting a set of - // temporary security credentials, the user will receive an "access denied" - // response when requesting resources that require MFA authentication. + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. // // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. @@ -2374,10 +2982,8 @@ type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` } @@ -2396,3 +3002,114 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO s.Credentials = v return s } + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go index 4010cc7fa..d5307fcaa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -3,10 +3,9 @@ package sts import "github.com/aws/aws-sdk-go/aws/request" func init() { - initRequest = func(r *request.Request) { - switch r.Operation.Name { - case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: - r.Handlers.Sign.Clear() // these operations are unsigned - } - } + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index ef681ab0c..fcb720dca 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -7,22 +7,14 @@ // request temporary, limited-privilege credentials for AWS Identity and Access // Management (IAM) users or for users that you authenticate (federated users). // This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) // in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) // in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) // in the IAM User Guide. // // If you're new to AWS and need additional technical information about a specific @@ -31,14 +23,38 @@ // // Endpoints // -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) // in the IAM User Guide. // -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. // // Recording API requests // @@ -46,8 +62,28 @@ // your AWS account and delivers log files to an Amazon S3 bucket. By using // information collected by CloudTrail, you can determine what requests were // successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index e24884ef3..a233f542e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -14,11 +14,11 @@ const ( // ErrCodeIDPCommunicationErrorException for service response error code // "IDPCommunicationError". // - // The request could not be fulfilled because the non-AWS identity provider - // (IDP) that was asked to verify the incoming identity token could not be reached. - // This is often a transient error caused by network conditions. Retry the request + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the - // error persists, the non-AWS identity provider might be down or not responding. + // error persists, the identity provider might be down or not responding. ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" // ErrCodeIDPRejectedClaimException for service response error code @@ -56,9 +56,18 @@ const ( // ErrCodePackedPolicyTooLargeException for service response error code // "PackedPolicyTooLarge". // - // The request was rejected because the policy document was too large. The error - // message describes how big the policy document is, in packed form, as a percentage - // of what the API allows. + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" // ErrCodeRegionDisabledException for service response error code @@ -67,7 +76,7 @@ const ( // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. ErrCodeRegionDisabledException = "RegionDisabledException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d1..d34a68553 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "sts" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifer of a specific service. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the STS client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a STS client from just a session. // svc := sts.New(mySession) // @@ -46,11 +48,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 000000000..e2e1d6efe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbcdf..000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index af27ff076..000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 988dceaba..000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,44 +0,0 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -To use a tagged revision: - -```sh -$ go get gopkg.in/ini.v1 -``` - -To use with latest changes: - -```sh -$ go get github.com/go-ini/ini -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe7431..000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index d7982c323..000000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // Actual data is stored here. - sections map[string]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := "=" - if PrettyFormat || PrettyEqual { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } else { - sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:]) - } - if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { - return nil, err - } - } - - if i > 0 || DefaultHeader { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } else { - key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:]) - } - - // Support multiline comments - key.Comment = strings.Replace(key.Comment, "\n", "\n; ", -1) - - if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { - return nil, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err := buf.WriteString(kname); err != nil { - return nil, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return nil, err - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 595f6002f..000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build go1.6 - -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" -) - -const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DEFAULT_SECTION = "DEFAULT" - - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - _VERSION = "1.38.1" -) - -// Version returns current package version literal. -func Version() string { - return _VERSION -} - -var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - - // Place spaces around "=" sign even when PrettyFormat is false - PrettyEqual = false - - // Explicitly write DEFAULT section header - DefaultHeader = false - - // Indicate whether to put a line between sections - PrettySection = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 7c8566a1b..000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,751 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - return []string{k.value} - } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx += 1 - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - for _, str := range strs { - val, err := strconv.Atoi(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 3daf54c38..000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, - parserBufferSize int, - ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if unescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if unescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !ignoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !ignoreInlineComment { - var i int - if spaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && unescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } - } else if allowPythonMultilines && lastChar == '\n' { - parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - identSize := -1 - val := line - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil { - if peekErr == io.EOF { - return val, nil - } - return "", peekErr - } - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - if len(peekMatches) != 3 { - return val, nil - } - - currentIdentSize := len(peekMatches[1]) - // NOTE: Return if not a python-ini multi-line value. - if currentIdentSize < 0 { - return val, nil - } - identSize = currentIdentSize - - // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.readUntil('\n') - if err != nil { - return "", err - } - - val += fmt.Sprintf("\n%s", peekMatches[2]) - } - - // NOTE: If it was a Python multi-line value, - // return the appended value. - if identSize > 0 { - return val, nil - } - } - - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DEFAULT_SECTION - if f.options.Insensitive { - name = strings.ToLower(DEFAULT_SECTION) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 1kb at a time. - currentPeekSize := 1024 - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) { - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index 340a1efad..000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -// TODO: delete me in v2 -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + "." - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index 9719dc698..000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetUint(uintVal) - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { - opts := strings.SplitN(tag, ",", 3) - rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" - } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - return rawName, omitEmpty, allowShadow -} - -func (s *Section) mapTo(val reflect.Value, isStrict bool) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, false) -} - -// MapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index fbe4ec442..ad5989800 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -27,6 +27,7 @@ Daniël van Eeden Dave Protasowski DisposaBoy Egor Smolyakov +Erwan Martin Evan Shaw Frederick Mayle Gustavo Kristic @@ -34,12 +35,16 @@ Hajime Nakagami Hanno Braun Henri Yandell Hirotaka Yamamoto +Huyiguang ICHINOSE Shogo +Ilia Cimpoes INADA Naoki Jacek Szwec James Harr Jeff Hodges Jeffrey Charles +Jerome Meyer +Jiajia Zhong Jian Zhen Joshua Prunier Julien Lefevre @@ -58,6 +63,7 @@ Lucas Liu Luke Scott Maciej Zimnoch Michael Woolnough +Nathanial Murphy Nicola Peduzzi Olivier Mengué oscarzhao @@ -69,10 +75,15 @@ Richard Wilkes Robert Russell Runrioter Wung Shuode Li +Simon J Mudd Soroush Pour Stan Putrya Stanley Gunawan +Steven Hartland Thomas Wodarek +Tim Ruffles +Tom Jenkinson +Vladimir Kovpak Xiangyu Hu Xiaobing Jiang Xiuming Chen @@ -82,9 +93,13 @@ Zhenye Xie Barracuda Networks, Inc. Counting Ltd. +DigitalOcean Inc. +Facebook Inc. +GitHub Inc. Google Inc. InfoSum Ltd. Keybase Inc. +Multiplay Ltd. Percona LLC Pivotal Inc. Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md index 2d87d74c9..9cb97b38d 100644 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -1,3 +1,42 @@ +## Version 1.5 (2020-01-07) + +Changes: + + - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017) + - Improve buffer handling (#890) + - Document potentially insecure TLS configs (#901) + - Use a double-buffering scheme to prevent data races (#943) + - Pass uint64 values without converting them to string (#838, #955) + - Update collations and make utf8mb4 default (#877, #1054) + - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995) + - Removed CloudSQL support (#993, #1007) + - Add Go Module support (#1003) + +New Features: + + - Implement support of optional TLS (#900) + - Check connection liveness (#934, #964, #997, #1048, #1051, #1052) + - Implement Connector Interface (#941, #958, #1020, #1035) + +Bugfixes: + + - Mark connections as bad on error during ping (#875) + - Mark connections as bad on error during dial (#867) + - Fix connection leak caused by rapid context cancellation (#1024) + - Mark connections as bad on error during Conn.Prepare (#1030) + + +## Version 1.4.1 (2018-11-14) + +Bugfixes: + + - Fix TIME format for binary columns (#818) + - Fix handling of empty auth plugin names (#835) + - Fix caching_sha2_password with empty password (#826) + - Fix canceled context broke mysqlConn (#862) + - Fix OldAuthSwitchRequest support (#870) + - Fix Auth Response packet for cleartext password (#887) + ## Version 1.4 (2018-06-03) Changes: diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md deleted file mode 100644 index 8fe16bcb4..000000000 --- a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing Guidelines - -## Reporting Issues - -Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). - -## Contributing Code - -By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. -Don't forget to add yourself to the AUTHORS file. - -### Code Review - -Everyone is invited to review and comment on pull requests. -If it looks fine to you, comment with "LGTM" (Looks good to me). - -If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. - -Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". - -## Development Ideas - -If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 7e7df1a3d..d2627a41a 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -40,7 +40,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * Optional placeholder interpolation ## Requirements - * Go 1.8 or higher. We aim to support the 3 latest versions of Go. + * Go 1.10 or higher. We aim to support the 3 latest versions of Go. * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) --------------------------------------- @@ -166,18 +166,34 @@ Sets the charset used for client-server interaction (`"SET NAMES "`). If Usage of the `charset` parameter is discouraged because it issues additional queries to the server. Unless you need the fallback behavior, please use `collation` instead. +##### `checkConnLiveness` + +``` +Type: bool +Valid Values: true, false +Default: true +``` + +On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection. +`checkConnLiveness=false` disables this liveness check of connections. + ##### `collation` ``` Type: string Valid Values: -Default: utf8_general_ci +Default: utf8mb4_general_ci ``` Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. A list of valid charsets for a server is retrievable with `SHOW COLLATION`. +The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL. + +Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)). + + ##### `clientFoundRows` ``` @@ -328,11 +344,11 @@ Timeout for establishing connections, aka dial timeout. The value must be a deci ``` Type: bool / string -Valid Values: true, false, skip-verify, +Valid Values: true, false, skip-verify, preferred, Default: false ``` -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). ##### `writeTimeout` @@ -391,14 +407,9 @@ TCP on a remote host, e.g. Amazon RDS: id:password@tcp(your-amazonaws-uri.com:3306)/dbname ``` -Google Cloud SQL on App Engine (First Generation MySQL Server): -``` -user@cloudsql(project-id:instance-name)/dbname -``` - -Google Cloud SQL on App Engine (Second Generation MySQL Server): +Google Cloud SQL on App Engine: ``` -user@cloudsql(project-id:regionname:instance-name)/dbname +user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname ``` TCP using default port (3306) on localhost: @@ -444,7 +455,7 @@ See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/my ### `time.Time` support The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. -However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. **Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). @@ -452,13 +463,13 @@ Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-d ### Unicode support -Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. +Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default. Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. -See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. +See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support. ## Testing / Development To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go index 2f61ecd4f..fec7040d4 100644 --- a/vendor/github.com/go-sql-driver/mysql/auth.go +++ b/vendor/github.com/go-sql-driver/mysql/auth.go @@ -234,64 +234,64 @@ func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) erro if err != nil { return err } - return mc.writeAuthSwitchPacket(enc, false) + return mc.writeAuthSwitchPacket(enc) } -func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, bool, error) { +func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) { switch plugin { case "caching_sha2_password": authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) - return authResp, false, nil + return authResp, nil case "mysql_old_password": if !mc.cfg.AllowOldPasswords { - return nil, false, ErrOldPassword + return nil, ErrOldPassword } // Note: there are edge cases where this should work but doesn't; // this is currently "wontfix": // https://github.com/go-sql-driver/mysql/issues/184 - authResp := scrambleOldPassword(authData[:8], mc.cfg.Passwd) - return authResp, true, nil + authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0) + return authResp, nil case "mysql_clear_password": if !mc.cfg.AllowCleartextPasswords { - return nil, false, ErrCleartextPassword + return nil, ErrCleartextPassword } // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html - return []byte(mc.cfg.Passwd), true, nil + return append([]byte(mc.cfg.Passwd), 0), nil case "mysql_native_password": if !mc.cfg.AllowNativePasswords { - return nil, false, ErrNativePassword + return nil, ErrNativePassword } // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html // Native password authentication only need and will need 20-byte challenge. authResp := scramblePassword(authData[:20], mc.cfg.Passwd) - return authResp, false, nil + return authResp, nil case "sha256_password": if len(mc.cfg.Passwd) == 0 { - return nil, true, nil + return []byte{0}, nil } if mc.cfg.tls != nil || mc.cfg.Net == "unix" { // write cleartext auth packet - return []byte(mc.cfg.Passwd), true, nil + return append([]byte(mc.cfg.Passwd), 0), nil } pubKey := mc.cfg.pubKey if pubKey == nil { // request public key from server - return []byte{1}, false, nil + return []byte{1}, nil } // encrypted password enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) - return enc, false, err + return enc, err default: errLog.Print("unknown auth plugin:", plugin) - return nil, false, ErrUnknownPlugin + return nil, ErrUnknownPlugin } } @@ -315,11 +315,11 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { plugin = newPlugin - authResp, addNUL, err := mc.auth(authData, plugin) + authResp, err := mc.auth(authData, plugin) if err != nil { return err } - if err = mc.writeAuthSwitchPacket(authResp, addNUL); err != nil { + if err = mc.writeAuthSwitchPacket(authResp); err != nil { return err } @@ -352,7 +352,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { case cachingSha2PasswordPerformFullAuthentication: if mc.cfg.tls != nil || mc.cfg.Net == "unix" { // write cleartext auth packet - err = mc.writeAuthSwitchPacket([]byte(mc.cfg.Passwd), true) + err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0)) if err != nil { return err } @@ -360,13 +360,15 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { pubKey := mc.cfg.pubKey if pubKey == nil { // request public key from server - data := mc.buf.takeSmallBuffer(4 + 1) + data, err := mc.buf.takeSmallBuffer(4 + 1) + if err != nil { + return err + } data[4] = cachingSha2PasswordRequestPublicKey mc.writePacket(data) // parse public key - data, err := mc.readPacket() - if err != nil { + if data, err = mc.readPacket(); err != nil { return err } diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go index eb4748bf4..0774c5c8c 100644 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -15,47 +15,69 @@ import ( ) const defaultBufSize = 4096 +const maxCachedBufSize = 256 * 1024 // A buffer which is used for both reading and writing. // This is possible since communication on each connection is synchronous. // In other words, we can't write and read simultaneously on the same connection. // The buffer is similar to bufio.Reader / Writer but zero-copy-ish // Also highly optimized for this particular use case. +// This buffer is backed by two byte slices in a double-buffering scheme type buffer struct { - buf []byte + buf []byte // buf is a byte buffer who's length and capacity are equal. nc net.Conn idx int length int timeout time.Duration + dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer + flipcnt uint // flipccnt is the current buffer counter for double-buffering } +// newBuffer allocates and returns a new buffer. func newBuffer(nc net.Conn) buffer { - var b [defaultBufSize]byte + fg := make([]byte, defaultBufSize) return buffer{ - buf: b[:], - nc: nc, + buf: fg, + nc: nc, + dbuf: [2][]byte{fg, nil}, } } +// flip replaces the active buffer with the background buffer +// this is a delayed flip that simply increases the buffer counter; +// the actual flip will be performed the next time we call `buffer.fill` +func (b *buffer) flip() { + b.flipcnt += 1 +} + // fill reads into the buffer until at least _need_ bytes are in it func (b *buffer) fill(need int) error { n := b.length + // fill data into its double-buffering target: if we've called + // flip on this buffer, we'll be copying to the background buffer, + // and then filling it with network data; otherwise we'll just move + // the contents of the current buffer to the front before filling it + dest := b.dbuf[b.flipcnt&1] + + // grow buffer if necessary to fit the whole packet. + if need > len(dest) { + // Round up to the next multiple of the default size + dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) - // move existing data to the beginning - if n > 0 && b.idx > 0 { - copy(b.buf[0:n], b.buf[b.idx:]) + // if the allocated buffer is not too large, move it to backing storage + // to prevent extra allocations on applications that perform large reads + if len(dest) <= maxCachedBufSize { + b.dbuf[b.flipcnt&1] = dest + } } - // grow buffer if necessary - // TODO: let the buffer shrink again at some point - // Maybe keep the org buf slice and swap back? - if need > len(b.buf) { - // Round up to the next multiple of the default size - newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) - copy(newBuf, b.buf) - b.buf = newBuf + // if we're filling the fg buffer, move the existing data to the start of it. + // if we're filling the bg buffer, copy over the data + if n > 0 { + copy(dest[:n], b.buf[b.idx:]) } + b.buf = dest b.idx = 0 for { @@ -105,43 +127,56 @@ func (b *buffer) readNext(need int) ([]byte, error) { return b.buf[offset:b.idx], nil } -// returns a buffer with the requested size. +// takeBuffer returns a buffer with the requested size. // If possible, a slice from the existing buffer is returned. // Otherwise a bigger buffer is made. // Only one buffer (total) can be used at a time. -func (b *buffer) takeBuffer(length int) []byte { +func (b *buffer) takeBuffer(length int) ([]byte, error) { if b.length > 0 { - return nil + return nil, ErrBusyBuffer } // test (cheap) general case first - if length <= defaultBufSize || length <= cap(b.buf) { - return b.buf[:length] + if length <= cap(b.buf) { + return b.buf[:length], nil } if length < maxPacketSize { b.buf = make([]byte, length) - return b.buf + return b.buf, nil } - return make([]byte, length) + + // buffer is larger than we want to store. + return make([]byte, length), nil } -// shortcut which can be used if the requested buffer is guaranteed to be -// smaller than defaultBufSize +// takeSmallBuffer is shortcut which can be used if length is +// known to be smaller than defaultBufSize. // Only one buffer (total) can be used at a time. -func (b *buffer) takeSmallBuffer(length int) []byte { +func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { if b.length > 0 { - return nil + return nil, ErrBusyBuffer } - return b.buf[:length] + return b.buf[:length], nil } // takeCompleteBuffer returns the complete existing buffer. // This can be used if the necessary buffer size is unknown. +// cap and len of the returned buffer will be equal. // Only one buffer (total) can be used at a time. -func (b *buffer) takeCompleteBuffer() []byte { +func (b *buffer) takeCompleteBuffer() ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + return b.buf, nil +} + +// store stores buf, an updated buffer, if its suitable to do so. +func (b *buffer) store(buf []byte) error { if b.length > 0 { - return nil + return ErrBusyBuffer + } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { + b.buf = buf[:cap(buf)] } - return b.buf + return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go index 136c9e4d1..8d2b55676 100644 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -8,183 +8,190 @@ package mysql -const defaultCollation = "utf8_general_ci" +const defaultCollation = "utf8mb4_general_ci" const binaryCollation = "binary" // A list of available collations mapped to the internal ID. // To update this map use the following MySQL query: -// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID +// +// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255. +// +// ucs2, utf16, and utf32 can't be used for connection charset. +// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset +// They are commented out to reduce this map. var collations = map[string]byte{ - "big5_chinese_ci": 1, - "latin2_czech_cs": 2, - "dec8_swedish_ci": 3, - "cp850_general_ci": 4, - "latin1_german1_ci": 5, - "hp8_english_ci": 6, - "koi8r_general_ci": 7, - "latin1_swedish_ci": 8, - "latin2_general_ci": 9, - "swe7_swedish_ci": 10, - "ascii_general_ci": 11, - "ujis_japanese_ci": 12, - "sjis_japanese_ci": 13, - "cp1251_bulgarian_ci": 14, - "latin1_danish_ci": 15, - "hebrew_general_ci": 16, - "tis620_thai_ci": 18, - "euckr_korean_ci": 19, - "latin7_estonian_cs": 20, - "latin2_hungarian_ci": 21, - "koi8u_general_ci": 22, - "cp1251_ukrainian_ci": 23, - "gb2312_chinese_ci": 24, - "greek_general_ci": 25, - "cp1250_general_ci": 26, - "latin2_croatian_ci": 27, - "gbk_chinese_ci": 28, - "cp1257_lithuanian_ci": 29, - "latin5_turkish_ci": 30, - "latin1_german2_ci": 31, - "armscii8_general_ci": 32, - "utf8_general_ci": 33, - "cp1250_czech_cs": 34, - "ucs2_general_ci": 35, - "cp866_general_ci": 36, - "keybcs2_general_ci": 37, - "macce_general_ci": 38, - "macroman_general_ci": 39, - "cp852_general_ci": 40, - "latin7_general_ci": 41, - "latin7_general_cs": 42, - "macce_bin": 43, - "cp1250_croatian_ci": 44, - "utf8mb4_general_ci": 45, - "utf8mb4_bin": 46, - "latin1_bin": 47, - "latin1_general_ci": 48, - "latin1_general_cs": 49, - "cp1251_bin": 50, - "cp1251_general_ci": 51, - "cp1251_general_cs": 52, - "macroman_bin": 53, - "utf16_general_ci": 54, - "utf16_bin": 55, - "utf16le_general_ci": 56, - "cp1256_general_ci": 57, - "cp1257_bin": 58, - "cp1257_general_ci": 59, - "utf32_general_ci": 60, - "utf32_bin": 61, - "utf16le_bin": 62, - "binary": 63, - "armscii8_bin": 64, - "ascii_bin": 65, - "cp1250_bin": 66, - "cp1256_bin": 67, - "cp866_bin": 68, - "dec8_bin": 69, - "greek_bin": 70, - "hebrew_bin": 71, - "hp8_bin": 72, - "keybcs2_bin": 73, - "koi8r_bin": 74, - "koi8u_bin": 75, - "latin2_bin": 77, - "latin5_bin": 78, - "latin7_bin": 79, - "cp850_bin": 80, - "cp852_bin": 81, - "swe7_bin": 82, - "utf8_bin": 83, - "big5_bin": 84, - "euckr_bin": 85, - "gb2312_bin": 86, - "gbk_bin": 87, - "sjis_bin": 88, - "tis620_bin": 89, - "ucs2_bin": 90, - "ujis_bin": 91, - "geostd8_general_ci": 92, - "geostd8_bin": 93, - "latin1_spanish_ci": 94, - "cp932_japanese_ci": 95, - "cp932_bin": 96, - "eucjpms_japanese_ci": 97, - "eucjpms_bin": 98, - "cp1250_polish_ci": 99, - "utf16_unicode_ci": 101, - "utf16_icelandic_ci": 102, - "utf16_latvian_ci": 103, - "utf16_romanian_ci": 104, - "utf16_slovenian_ci": 105, - "utf16_polish_ci": 106, - "utf16_estonian_ci": 107, - "utf16_spanish_ci": 108, - "utf16_swedish_ci": 109, - "utf16_turkish_ci": 110, - "utf16_czech_ci": 111, - "utf16_danish_ci": 112, - "utf16_lithuanian_ci": 113, - "utf16_slovak_ci": 114, - "utf16_spanish2_ci": 115, - "utf16_roman_ci": 116, - "utf16_persian_ci": 117, - "utf16_esperanto_ci": 118, - "utf16_hungarian_ci": 119, - "utf16_sinhala_ci": 120, - "utf16_german2_ci": 121, - "utf16_croatian_ci": 122, - "utf16_unicode_520_ci": 123, - "utf16_vietnamese_ci": 124, - "ucs2_unicode_ci": 128, - "ucs2_icelandic_ci": 129, - "ucs2_latvian_ci": 130, - "ucs2_romanian_ci": 131, - "ucs2_slovenian_ci": 132, - "ucs2_polish_ci": 133, - "ucs2_estonian_ci": 134, - "ucs2_spanish_ci": 135, - "ucs2_swedish_ci": 136, - "ucs2_turkish_ci": 137, - "ucs2_czech_ci": 138, - "ucs2_danish_ci": 139, - "ucs2_lithuanian_ci": 140, - "ucs2_slovak_ci": 141, - "ucs2_spanish2_ci": 142, - "ucs2_roman_ci": 143, - "ucs2_persian_ci": 144, - "ucs2_esperanto_ci": 145, - "ucs2_hungarian_ci": 146, - "ucs2_sinhala_ci": 147, - "ucs2_german2_ci": 148, - "ucs2_croatian_ci": 149, - "ucs2_unicode_520_ci": 150, - "ucs2_vietnamese_ci": 151, - "ucs2_general_mysql500_ci": 159, - "utf32_unicode_ci": 160, - "utf32_icelandic_ci": 161, - "utf32_latvian_ci": 162, - "utf32_romanian_ci": 163, - "utf32_slovenian_ci": 164, - "utf32_polish_ci": 165, - "utf32_estonian_ci": 166, - "utf32_spanish_ci": 167, - "utf32_swedish_ci": 168, - "utf32_turkish_ci": 169, - "utf32_czech_ci": 170, - "utf32_danish_ci": 171, - "utf32_lithuanian_ci": 172, - "utf32_slovak_ci": 173, - "utf32_spanish2_ci": 174, - "utf32_roman_ci": 175, - "utf32_persian_ci": 176, - "utf32_esperanto_ci": 177, - "utf32_hungarian_ci": 178, - "utf32_sinhala_ci": 179, - "utf32_german2_ci": 180, - "utf32_croatian_ci": 181, - "utf32_unicode_520_ci": 182, - "utf32_vietnamese_ci": 183, + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + //"ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + //"utf16_general_ci": 54, + //"utf16_bin": 55, + //"utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + //"utf32_general_ci": 60, + //"utf32_bin": 61, + //"utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "utf8_tolower_ci": 76, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + //"ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + //"utf16_unicode_ci": 101, + //"utf16_icelandic_ci": 102, + //"utf16_latvian_ci": 103, + //"utf16_romanian_ci": 104, + //"utf16_slovenian_ci": 105, + //"utf16_polish_ci": 106, + //"utf16_estonian_ci": 107, + //"utf16_spanish_ci": 108, + //"utf16_swedish_ci": 109, + //"utf16_turkish_ci": 110, + //"utf16_czech_ci": 111, + //"utf16_danish_ci": 112, + //"utf16_lithuanian_ci": 113, + //"utf16_slovak_ci": 114, + //"utf16_spanish2_ci": 115, + //"utf16_roman_ci": 116, + //"utf16_persian_ci": 117, + //"utf16_esperanto_ci": 118, + //"utf16_hungarian_ci": 119, + //"utf16_sinhala_ci": 120, + //"utf16_german2_ci": 121, + //"utf16_croatian_ci": 122, + //"utf16_unicode_520_ci": 123, + //"utf16_vietnamese_ci": 124, + //"ucs2_unicode_ci": 128, + //"ucs2_icelandic_ci": 129, + //"ucs2_latvian_ci": 130, + //"ucs2_romanian_ci": 131, + //"ucs2_slovenian_ci": 132, + //"ucs2_polish_ci": 133, + //"ucs2_estonian_ci": 134, + //"ucs2_spanish_ci": 135, + //"ucs2_swedish_ci": 136, + //"ucs2_turkish_ci": 137, + //"ucs2_czech_ci": 138, + //"ucs2_danish_ci": 139, + //"ucs2_lithuanian_ci": 140, + //"ucs2_slovak_ci": 141, + //"ucs2_spanish2_ci": 142, + //"ucs2_roman_ci": 143, + //"ucs2_persian_ci": 144, + //"ucs2_esperanto_ci": 145, + //"ucs2_hungarian_ci": 146, + //"ucs2_sinhala_ci": 147, + //"ucs2_german2_ci": 148, + //"ucs2_croatian_ci": 149, + //"ucs2_unicode_520_ci": 150, + //"ucs2_vietnamese_ci": 151, + //"ucs2_general_mysql500_ci": 159, + //"utf32_unicode_ci": 160, + //"utf32_icelandic_ci": 161, + //"utf32_latvian_ci": 162, + //"utf32_romanian_ci": 163, + //"utf32_slovenian_ci": 164, + //"utf32_polish_ci": 165, + //"utf32_estonian_ci": 166, + //"utf32_spanish_ci": 167, + //"utf32_swedish_ci": 168, + //"utf32_turkish_ci": 169, + //"utf32_czech_ci": 170, + //"utf32_danish_ci": 171, + //"utf32_lithuanian_ci": 172, + //"utf32_slovak_ci": 173, + //"utf32_spanish2_ci": 174, + //"utf32_roman_ci": 175, + //"utf32_persian_ci": 176, + //"utf32_esperanto_ci": 177, + //"utf32_hungarian_ci": 178, + //"utf32_sinhala_ci": 179, + //"utf32_german2_ci": 180, + //"utf32_croatian_ci": 181, + //"utf32_unicode_520_ci": 182, + //"utf32_vietnamese_ci": 183, "utf8_unicode_ci": 192, "utf8_icelandic_ci": 193, "utf8_latvian_ci": 194, @@ -234,18 +241,25 @@ var collations = map[string]byte{ "utf8mb4_croatian_ci": 245, "utf8mb4_unicode_520_ci": 246, "utf8mb4_vietnamese_ci": 247, + "gb18030_chinese_ci": 248, + "gb18030_bin": 249, + "gb18030_unicode_520_ci": 250, + "utf8mb4_0900_ai_ci": 255, } // A blacklist of collations which is unsafe to interpolate parameters. // These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. var unsafeCollations = map[string]bool{ - "big5_chinese_ci": true, - "sjis_japanese_ci": true, - "gbk_chinese_ci": true, - "big5_bin": true, - "gb2312_bin": true, - "gbk_bin": true, - "sjis_bin": true, - "cp932_japanese_ci": true, - "cp932_bin": true, + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, + "gb18030_chinese_ci": true, + "gb18030_bin": true, + "gb18030_unicode_520_ci": true, } diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go new file mode 100644 index 000000000..024eb2858 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go @@ -0,0 +1,54 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos + +package mysql + +import ( + "errors" + "io" + "net" + "syscall" +) + +var errUnexpectedRead = errors.New("unexpected read from socket") + +func connCheck(conn net.Conn) error { + var sysErr error + + sysConn, ok := conn.(syscall.Conn) + if !ok { + return nil + } + rawConn, err := sysConn.SyscallConn() + if err != nil { + return err + } + + err = rawConn.Read(func(fd uintptr) bool { + var buf [1]byte + n, err := syscall.Read(int(fd), buf[:]) + switch { + case n == 0 && err == nil: + sysErr = io.EOF + case n > 0: + sysErr = errUnexpectedRead + case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK: + sysErr = nil + default: + sysErr = err + } + return true + }) + if err != nil { + return err + } + + return sysErr +} diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go similarity index 59% rename from vendor/github.com/go-sql-driver/mysql/appengine.go rename to vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go index be41f2ee6..ea7fb607a 100644 --- a/vendor/github.com/go-sql-driver/mysql/appengine.go +++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go @@ -1,19 +1,17 @@ // Go MySQL Driver - A MySQL-Driver for Go's database/sql package // -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at http://mozilla.org/MPL/2.0/. -// +build appengine +// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos package mysql -import ( - "google.golang.org/appengine/cloudsql" -) +import "net" -func init() { - RegisterDial("cloudsql", cloudsql.Dial) +func connCheck(conn net.Conn) error { + return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go index 911be2060..e4bb59e67 100644 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -19,19 +19,10 @@ import ( "time" ) -// a copy of context.Context for Go 1.7 and earlier -type mysqlContext interface { - Done() <-chan struct{} - Err() error - - // defined in context.Context, but not used in this driver: - // Deadline() (deadline time.Time, ok bool) - // Value(key interface{}) interface{} -} - type mysqlConn struct { buf buffer netConn net.Conn + rawConn net.Conn // underlying connection when netConn is TLS connection. affectedRows uint64 insertId uint64 cfg *Config @@ -42,10 +33,11 @@ type mysqlConn struct { status statusFlag sequence uint8 parseTime bool + reset bool // set when the Go SQL package calls ResetSession // for context support (Go 1.8+) watching bool - watcher chan<- mysqlContext + watcher chan<- context.Context closech chan struct{} finished chan<- struct{} canceled atomicError // set non-nil if conn is canceled @@ -162,7 +154,9 @@ func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { // Send command err := mc.writeCommandPacketStr(comStmtPrepare, query) if err != nil { - return nil, mc.markBadConn(err) + // STMT_PREPARE is safe to retry. So we can return ErrBadConn here. + errLog.Print(err) + return nil, driver.ErrBadConn } stmt := &mysqlStmt{ @@ -192,10 +186,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin return "", driver.ErrSkip } - buf := mc.buf.takeCompleteBuffer() - if buf == nil { + buf, err := mc.buf.takeCompleteBuffer() + if err != nil { // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return "", ErrInvalidConn } buf = buf[:0] @@ -221,6 +215,9 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin switch v := arg.(type) { case int64: buf = strconv.AppendInt(buf, v, 10) + case uint64: + // Handle uint64 explicitly because our custom ConvertValue emits unsigned values + buf = strconv.AppendUint(buf, v, 10) case float64: buf = strconv.AppendFloat(buf, v, 'g', -1, 64) case bool: @@ -475,7 +472,7 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) { defer mc.finish() if err = mc.writeCommandPacket(comPing); err != nil { - return + return mc.markBadConn(err) } return mc.readResultOK() @@ -595,33 +592,32 @@ func (mc *mysqlConn) watchCancel(ctx context.Context) error { mc.cleanup() return nil } + // When ctx is already cancelled, don't watch it. + if err := ctx.Err(); err != nil { + return err + } + // When ctx is not cancellable, don't watch it. if ctx.Done() == nil { return nil } - - mc.watching = true - select { - default: - case <-ctx.Done(): - return ctx.Err() - } + // When watcher is not alive, can't watch it. if mc.watcher == nil { return nil } + mc.watching = true mc.watcher <- ctx - return nil } func (mc *mysqlConn) startWatcher() { - watcher := make(chan mysqlContext, 1) + watcher := make(chan context.Context, 1) mc.watcher = watcher finished := make(chan struct{}) mc.finished = finished go func() { for { - var ctx mysqlContext + var ctx context.Context select { case ctx = <-watcher: case <-mc.closech: @@ -650,5 +646,6 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error { if mc.closed.IsSet() { return driver.ErrBadConn } + mc.reset = true return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go new file mode 100644 index 000000000..d567b4e4f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connector.go @@ -0,0 +1,146 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "context" + "database/sql/driver" + "net" +) + +type connector struct { + cfg *Config // immutable private copy. +} + +// Connect implements driver.Connector interface. +// Connect returns a connection to the database. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + closech: make(chan struct{}), + cfg: c.cfg, + } + mc.parseTime = mc.cfg.ParseTime + + // Connect to Server + dialsLock.RLock() + dial, ok := dials[mc.cfg.Net] + dialsLock.RUnlock() + if ok { + dctx := ctx + if mc.cfg.Timeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) + defer cancel() + } + mc.netConn, err = dial(dctx, mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) + } + + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + // Call startWatcher for context support (From Go 1.8) + mc.startWatcher() + if err := mc.watchCancel(ctx); err != nil { + mc.cleanup() + return nil, err + } + defer mc.finish() + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + authData, plugin, err := mc.readHandshakePacket() + if err != nil { + mc.cleanup() + return nil, err + } + + if plugin == "" { + plugin = defaultAuthPlugin + } + + // Send Client Authentication Packet + authResp, err := mc.auth(authData, plugin) + if err != nil { + // try the default auth plugin, if using the requested plugin failed + errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) + plugin = defaultAuthPlugin + authResp, err = mc.auth(authData, plugin) + if err != nil { + mc.cleanup() + return nil, err + } + } + if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = mc.handleAuthResult(authData, plugin); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +// Driver implements driver.Connector interface. +// Driver returns &MySQLDriver{}. +func (c *connector) Driver() driver.Driver { + return &MySQLDriver{} +} diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go index ba1297825..c1bdf1199 100644 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -17,6 +17,7 @@ package mysql import ( + "context" "database/sql" "database/sql/driver" "net" @@ -29,137 +30,78 @@ type MySQLDriver struct{} // DialFunc is a function which can be used to establish the network connection. // Custom dial functions must be registered with RegisterDial +// +// Deprecated: users should register a DialContextFunc instead type DialFunc func(addr string) (net.Conn, error) +// DialContextFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDialContext +type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error) + var ( dialsLock sync.RWMutex - dials map[string]DialFunc + dials map[string]DialContextFunc ) -// RegisterDial registers a custom dial function. It can then be used by the +// RegisterDialContext registers a custom dial function. It can then be used by the // network address mynet(addr), where mynet is the registered new network. -// addr is passed as a parameter to the dial function. -func RegisterDial(net string, dial DialFunc) { +// The current context for the connection and its address is passed to the dial function. +func RegisterDialContext(net string, dial DialContextFunc) { dialsLock.Lock() defer dialsLock.Unlock() if dials == nil { - dials = make(map[string]DialFunc) + dials = make(map[string]DialContextFunc) } dials[net] = dial } +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +// +// Deprecated: users should call RegisterDialContext instead +func RegisterDial(network string, dial DialFunc) { + RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) { + return dial(addr) + }) +} + // Open new Connection. // See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how -// the DSN string is formated +// the DSN string is formatted func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { - var err error - - // New mysqlConn - mc := &mysqlConn{ - maxAllowedPacket: maxPacketSize, - maxWriteSize: maxPacketSize - 1, - closech: make(chan struct{}), - } - mc.cfg, err = ParseDSN(dsn) + cfg, err := ParseDSN(dsn) if err != nil { return nil, err } - mc.parseTime = mc.cfg.ParseTime - - // Connect to Server - dialsLock.RLock() - dial, ok := dials[mc.cfg.Net] - dialsLock.RUnlock() - if ok { - mc.netConn, err = dial(mc.cfg.Addr) - } else { - nd := net.Dialer{Timeout: mc.cfg.Timeout} - mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) - } - if err != nil { - return nil, err - } - - // Enable TCP Keepalives on TCP connections - if tc, ok := mc.netConn.(*net.TCPConn); ok { - if err := tc.SetKeepAlive(true); err != nil { - // Don't send COM_QUIT before handshake. - mc.netConn.Close() - mc.netConn = nil - return nil, err - } - } - - // Call startWatcher for context support (From Go 1.8) - mc.startWatcher() - - mc.buf = newBuffer(mc.netConn) - - // Set I/O timeouts - mc.buf.timeout = mc.cfg.ReadTimeout - mc.writeTimeout = mc.cfg.WriteTimeout - - // Reading Handshake Initialization Packet - authData, plugin, err := mc.readHandshakePacket() - if err != nil { - mc.cleanup() - return nil, err - } - if plugin == "" { - plugin = defaultAuthPlugin + c := &connector{ + cfg: cfg, } + return c.Connect(context.Background()) +} - // Send Client Authentication Packet - authResp, addNUL, err := mc.auth(authData, plugin) - if err != nil { - // try the default auth plugin, if using the requested plugin failed - errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) - plugin = defaultAuthPlugin - authResp, addNUL, err = mc.auth(authData, plugin) - if err != nil { - mc.cleanup() - return nil, err - } - } - if err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin); err != nil { - mc.cleanup() - return nil, err - } +func init() { + sql.Register("mysql", &MySQLDriver{}) +} - // Handle response to auth packet, switch methods if possible - if err = mc.handleAuthResult(authData, plugin); err != nil { - // Authentication failed and MySQL has already closed the connection - // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). - // Do not send COM_QUIT, just cleanup and return the error. - mc.cleanup() +// NewConnector returns new driver.Connector. +func NewConnector(cfg *Config) (driver.Connector, error) { + cfg = cfg.Clone() + // normalize the contents of cfg so calls to NewConnector have the same + // behavior as MySQLDriver.OpenConnector + if err := cfg.normalize(); err != nil { return nil, err } + return &connector{cfg: cfg}, nil +} - if mc.cfg.MaxAllowedPacket > 0 { - mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket - } else { - // Get max allowed packet size - maxap, err := mc.getSystemVar("max_allowed_packet") - if err != nil { - mc.Close() - return nil, err - } - mc.maxAllowedPacket = stringToInt(maxap) - 1 - } - if mc.maxAllowedPacket < maxPacketSize { - mc.maxWriteSize = mc.maxAllowedPacket - } - - // Handle DSN Params - err = mc.handleParams() +// OpenConnector implements driver.DriverContext. +func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) { + cfg, err := ParseDSN(dsn) if err != nil { - mc.Close() return nil, err } - - return mc, nil -} - -func init() { - sql.Register("mysql", &MySQLDriver{}) + return &connector{ + cfg: cfg, + }, nil } diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index be014babe..75c8c2489 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -14,6 +14,7 @@ import ( "crypto/tls" "errors" "fmt" + "math/big" "net" "net/url" "sort" @@ -54,6 +55,7 @@ type Config struct { AllowCleartextPasswords bool // Allows the cleartext client side plugin AllowNativePasswords bool // Allows the native password authentication method AllowOldPasswords bool // Allows the old insecure password method + CheckConnLiveness bool // Check connections for liveness before using them ClientFoundRows bool // Return number of matching rows instead of rows changed ColumnsWithAlias bool // Prepend table alias to column names InterpolateParams bool // Interpolate placeholders into query string @@ -69,9 +71,30 @@ func NewConfig() *Config { Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, + CheckConnLiveness: true, } } +func (cfg *Config) Clone() *Config { + cp := *cfg + if cp.tls != nil { + cp.tls = cfg.tls.Clone() + } + if len(cp.Params) > 0 { + cp.Params = make(map[string]string, len(cfg.Params)) + for k, v := range cfg.Params { + cp.Params[k] = v + } + } + if cfg.pubKey != nil { + cp.pubKey = &rsa.PublicKey{ + N: new(big.Int).Set(cfg.pubKey.N), + E: cfg.pubKey.E, + } + } + return &cp +} + func (cfg *Config) normalize() error { if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { return errInvalidDSNUnsafeCollation @@ -92,23 +115,54 @@ func (cfg *Config) normalize() error { default: return errors.New("default addr for network '" + cfg.Net + "' unknown") } - } else if cfg.Net == "tcp" { cfg.Addr = ensureHavePort(cfg.Addr) } - if cfg.tls != nil { - if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { - host, _, err := net.SplitHostPort(cfg.Addr) - if err == nil { - cfg.tls.ServerName = host - } + switch cfg.TLSConfig { + case "false", "": + // don't set anything + case "true": + cfg.tls = &tls.Config{} + case "skip-verify", "preferred": + cfg.tls = &tls.Config{InsecureSkipVerify: true} + default: + cfg.tls = getTLSConfigClone(cfg.TLSConfig) + if cfg.tls == nil { + return errors.New("invalid value / unknown config name: " + cfg.TLSConfig) + } + } + + if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + cfg.tls.ServerName = host + } + } + + if cfg.ServerPubKey != "" { + cfg.pubKey = getServerPubKey(cfg.ServerPubKey) + if cfg.pubKey == nil { + return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey) } } return nil } +func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) { + buf.Grow(1 + len(name) + 1 + len(value)) + if !*hasParam { + *hasParam = true + buf.WriteByte('?') + } else { + buf.WriteByte('&') + } + buf.WriteString(name) + buf.WriteByte('=') + buf.WriteString(value) +} + // FormatDSN formats the given Config into a DSN string which can be passed to // the driver. func (cfg *Config) FormatDSN() string { @@ -147,165 +201,75 @@ func (cfg *Config) FormatDSN() string { } if cfg.AllowCleartextPasswords { - if hasParam { - buf.WriteString("&allowCleartextPasswords=true") - } else { - hasParam = true - buf.WriteString("?allowCleartextPasswords=true") - } + writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true") } if !cfg.AllowNativePasswords { - if hasParam { - buf.WriteString("&allowNativePasswords=false") - } else { - hasParam = true - buf.WriteString("?allowNativePasswords=false") - } + writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false") } if cfg.AllowOldPasswords { - if hasParam { - buf.WriteString("&allowOldPasswords=true") - } else { - hasParam = true - buf.WriteString("?allowOldPasswords=true") - } + writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true") + } + + if !cfg.CheckConnLiveness { + writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false") } if cfg.ClientFoundRows { - if hasParam { - buf.WriteString("&clientFoundRows=true") - } else { - hasParam = true - buf.WriteString("?clientFoundRows=true") - } + writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") } if col := cfg.Collation; col != defaultCollation && len(col) > 0 { - if hasParam { - buf.WriteString("&collation=") - } else { - hasParam = true - buf.WriteString("?collation=") - } - buf.WriteString(col) + writeDSNParam(&buf, &hasParam, "collation", col) } if cfg.ColumnsWithAlias { - if hasParam { - buf.WriteString("&columnsWithAlias=true") - } else { - hasParam = true - buf.WriteString("?columnsWithAlias=true") - } + writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true") } if cfg.InterpolateParams { - if hasParam { - buf.WriteString("&interpolateParams=true") - } else { - hasParam = true - buf.WriteString("?interpolateParams=true") - } + writeDSNParam(&buf, &hasParam, "interpolateParams", "true") } if cfg.Loc != time.UTC && cfg.Loc != nil { - if hasParam { - buf.WriteString("&loc=") - } else { - hasParam = true - buf.WriteString("?loc=") - } - buf.WriteString(url.QueryEscape(cfg.Loc.String())) + writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String())) } if cfg.MultiStatements { - if hasParam { - buf.WriteString("&multiStatements=true") - } else { - hasParam = true - buf.WriteString("?multiStatements=true") - } + writeDSNParam(&buf, &hasParam, "multiStatements", "true") } if cfg.ParseTime { - if hasParam { - buf.WriteString("&parseTime=true") - } else { - hasParam = true - buf.WriteString("?parseTime=true") - } + writeDSNParam(&buf, &hasParam, "parseTime", "true") } if cfg.ReadTimeout > 0 { - if hasParam { - buf.WriteString("&readTimeout=") - } else { - hasParam = true - buf.WriteString("?readTimeout=") - } - buf.WriteString(cfg.ReadTimeout.String()) + writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String()) } if cfg.RejectReadOnly { - if hasParam { - buf.WriteString("&rejectReadOnly=true") - } else { - hasParam = true - buf.WriteString("?rejectReadOnly=true") - } + writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true") } if len(cfg.ServerPubKey) > 0 { - if hasParam { - buf.WriteString("&serverPubKey=") - } else { - hasParam = true - buf.WriteString("?serverPubKey=") - } - buf.WriteString(url.QueryEscape(cfg.ServerPubKey)) + writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey)) } if cfg.Timeout > 0 { - if hasParam { - buf.WriteString("&timeout=") - } else { - hasParam = true - buf.WriteString("?timeout=") - } - buf.WriteString(cfg.Timeout.String()) + writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String()) } if len(cfg.TLSConfig) > 0 { - if hasParam { - buf.WriteString("&tls=") - } else { - hasParam = true - buf.WriteString("?tls=") - } - buf.WriteString(url.QueryEscape(cfg.TLSConfig)) + writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig)) } if cfg.WriteTimeout > 0 { - if hasParam { - buf.WriteString("&writeTimeout=") - } else { - hasParam = true - buf.WriteString("?writeTimeout=") - } - buf.WriteString(cfg.WriteTimeout.String()) + writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String()) } if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { - if hasParam { - buf.WriteString("&maxAllowedPacket=") - } else { - hasParam = true - buf.WriteString("?maxAllowedPacket=") - } - buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket)) - + writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket)) } // other params @@ -316,16 +280,7 @@ func (cfg *Config) FormatDSN() string { } sort.Strings(params) for _, param := range params { - if hasParam { - buf.WriteByte('&') - } else { - hasParam = true - buf.WriteByte('?') - } - - buf.WriteString(param) - buf.WriteByte('=') - buf.WriteString(url.QueryEscape(cfg.Params[param])) + writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param])) } } @@ -452,6 +407,14 @@ func parseDSNParams(cfg *Config, params string) (err error) { return errors.New("invalid bool value: " + value) } + // Check connections for Liveness before using them + case "checkConnLiveness": + var isBool bool + cfg.CheckConnLiveness, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + // Switch "rowsAffected" mode case "clientFoundRows": var isBool bool @@ -531,13 +494,7 @@ func parseDSNParams(cfg *Config, params string) (err error) { if err != nil { return fmt.Errorf("invalid value for server pub key name: %v", err) } - - if pubKey := getServerPubKey(name); pubKey != nil { - cfg.ServerPubKey = name - cfg.pubKey = pubKey - } else { - return errors.New("invalid value / unknown server pub key name: " + name) - } + cfg.ServerPubKey = name // Strict mode case "strict": @@ -556,25 +513,17 @@ func parseDSNParams(cfg *Config, params string) (err error) { if isBool { if boolValue { cfg.TLSConfig = "true" - cfg.tls = &tls.Config{} } else { cfg.TLSConfig = "false" } - } else if vl := strings.ToLower(value); vl == "skip-verify" { + } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" { cfg.TLSConfig = vl - cfg.tls = &tls.Config{InsecureSkipVerify: true} } else { name, err := url.QueryUnescape(value) if err != nil { return fmt.Errorf("invalid value for TLS config name: %v", err) } - - if tlsConfig := getTLSConfigClone(name); tlsConfig != nil { - cfg.TLSConfig = name - cfg.tls = tlsConfig - } else { - return errors.New("invalid value / unknown config name: " + name) - } + cfg.TLSConfig = name } // I/O write Timeout diff --git a/vendor/github.com/go-sql-driver/mysql/go.mod b/vendor/github.com/go-sql-driver/mysql/go.mod new file mode 100644 index 000000000..fffbf6a90 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/go.mod @@ -0,0 +1,3 @@ +module github.com/go-sql-driver/mysql + +go 1.10 diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go new file mode 100644 index 000000000..afa8a89e9 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go @@ -0,0 +1,50 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "time" +) + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go new file mode 100644 index 000000000..c392594dd --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.13 + +package mysql + +import ( + "database/sql" +) + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime sql.NullTime diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go new file mode 100644 index 000000000..86d159d44 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go @@ -0,0 +1,34 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build !go1.13 + +package mysql + +import ( + "time" +) + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index 170aaa02b..82ad7a200 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -51,7 +51,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { mc.sequence++ // packets with length 0 terminate a previous packet which is a - // multiple of (2^24)−1 bytes long + // multiple of (2^24)-1 bytes long if pktLen == 0 { // there was no previous packet if prevData == nil { @@ -96,6 +96,35 @@ func (mc *mysqlConn) writePacket(data []byte) error { return ErrPktTooLarge } + // Perform a stale connection check. We only perform this check for + // the first query on a connection that has been checked out of the + // connection pool: a fresh connection from the pool is more likely + // to be stale, and it has not performed any previous writes that + // could cause data corruption, so it's safe to return ErrBadConn + // if the check fails. + if mc.reset { + mc.reset = false + conn := mc.netConn + if mc.rawConn != nil { + conn = mc.rawConn + } + var err error + // If this connection has a ReadTimeout which we've been setting on + // reads, reset it to its default value before we attempt a non-blocking + // read, otherwise the scheduler will just time us out before we can read + if mc.cfg.ReadTimeout != 0 { + err = conn.SetReadDeadline(time.Time{}) + } + if err == nil && mc.cfg.CheckConnLiveness { + err = connCheck(conn) + } + if err != nil { + errLog.Print("closing bad idle connection: ", err) + mc.Close() + return driver.ErrBadConn + } + } + for { var size int if pktLen >= maxPacketSize { @@ -194,7 +223,11 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro return nil, "", ErrOldProtocol } if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { - return nil, "", ErrNoTLS + if mc.cfg.TLSConfig == "preferred" { + mc.cfg.tls = nil + } else { + return nil, "", ErrNoTLS + } } pos += 2 @@ -243,7 +276,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro // Client Authentication Packet // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse -func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, plugin string) error { +func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error { // Adjust client flags based on server support clientFlags := clientProtocol41 | clientSecureConn | @@ -269,7 +302,8 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, // encode length of the auth plugin data var authRespLEIBuf [9]byte - authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(len(authResp))) + authRespLen := len(authResp) + authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen)) if len(authRespLEI) > 1 { // if the length can not be written in 1 byte, it must be written as a // length encoded integer @@ -277,9 +311,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, } pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1 - if addNUL { - pktLen++ - } // To specify a db name if n := len(mc.cfg.DBName); n > 0 { @@ -288,10 +319,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, } // Calculate packet length and get buffer with that size - data := mc.buf.takeSmallBuffer(pktLen + 4) - if data == nil { + data, err := mc.buf.takeSmallBuffer(pktLen + 4) + if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return errBadConnNoWrite } @@ -330,6 +361,7 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, if err := tlsConn.Handshake(); err != nil { return err } + mc.rawConn = mc.netConn mc.netConn = tlsConn mc.buf.nc = tlsConn } @@ -350,10 +382,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, // Auth Data [length encoded integer] pos += copy(data[pos:], authRespLEI) pos += copy(data[pos:], authResp) - if addNUL { - data[pos] = 0x00 - pos++ - } // Databasename [null terminated string] if len(mc.cfg.DBName) > 0 { @@ -364,30 +392,24 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, pos += copy(data[pos:], plugin) data[pos] = 0x00 + pos++ // Send Auth packet - return mc.writePacket(data) + return mc.writePacket(data[:pos]) } // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte, addNUL bool) error { +func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { pktLen := 4 + len(authData) - if addNUL { - pktLen++ - } - data := mc.buf.takeSmallBuffer(pktLen) - if data == nil { + data, err := mc.buf.takeSmallBuffer(pktLen) + if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return errBadConnNoWrite } // Add the auth data [EOF] copy(data[4:], authData) - if addNUL { - data[pktLen-1] = 0x00 - } - return mc.writePacket(data) } @@ -399,10 +421,10 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error { // Reset Packet Sequence mc.sequence = 0 - data := mc.buf.takeSmallBuffer(4 + 1) - if data == nil { + data, err := mc.buf.takeSmallBuffer(4 + 1) + if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return errBadConnNoWrite } @@ -418,10 +440,10 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { mc.sequence = 0 pktLen := 1 + len(arg) - data := mc.buf.takeBuffer(pktLen + 4) - if data == nil { + data, err := mc.buf.takeBuffer(pktLen + 4) + if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return errBadConnNoWrite } @@ -439,10 +461,10 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { // Reset Packet Sequence mc.sequence = 0 - data := mc.buf.takeSmallBuffer(4 + 1 + 4) - if data == nil { + data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) + if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return errBadConnNoWrite } @@ -479,7 +501,7 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { return data[1:], "", err case iEOF: - if len(data) < 1 { + if len(data) == 1 { // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest return nil, "mysql_old_password", nil } @@ -895,7 +917,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { const minPktLen = 4 + 1 + 4 + 1 + 4 mc := stmt.mc - // Determine threshould dynamically to avoid packet size shortage. + // Determine threshold dynamically to avoid packet size shortage. longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1) if longDataSize < 64 { longDataSize = 64 @@ -905,15 +927,17 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { mc.sequence = 0 var data []byte + var err error if len(args) == 0 { - data = mc.buf.takeBuffer(minPktLen) + data, err = mc.buf.takeBuffer(minPktLen) } else { - data = mc.buf.takeCompleteBuffer() + data, err = mc.buf.takeCompleteBuffer() + // In this case the len(data) == cap(data) which is used to optimise the flow below. } - if data == nil { + if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) + errLog.Print(err) return errBadConnNoWrite } @@ -939,7 +963,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { pos := minPktLen var nullMask []byte - if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) { // buffer has to be extended but we don't know by how much so // we depend on append after all data with known sizes fit. // We stop at that because we deal with a lot of columns here @@ -948,10 +972,11 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { copy(tmp[:pos], data[:pos]) data = tmp nullMask = data[pos : pos+maskLen] + // No need to clean nullMask as make ensures that. pos += maskLen } else { nullMask = data[pos : pos+maskLen] - for i := 0; i < maskLen; i++ { + for i := range nullMask { nullMask[i] = 0 } pos += maskLen @@ -996,6 +1021,22 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { ) } + case uint64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x80 // type is unsigned + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + case float64: paramTypes[i+i] = byte(fieldTypeDouble) paramTypes[i+i+1] = 0x00 @@ -1088,7 +1129,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // In that case we must build the data packet with the new values buffer if valuesCap != cap(paramValues) { data = append(data[:pos], paramValues...) - mc.buf.buf = data + if err = mc.buf.store(data); err != nil { + errLog.Print(err) + return errBadConnNoWrite + } } pos += len(paramValues) diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go index d3b1e2822..888bdb5f0 100644 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -111,6 +111,13 @@ func (rows *mysqlRows) Close() (err error) { return err } + // flip the buffer for this connection if we need to drain it. + // note that for a successful query (i.e. one where rows.next() + // has been called until it returns false), `rows.mc` will be nil + // by the time the user calls `(*Rows).Close`, so we won't reach this + // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47 + mc.buf.flip() + // Remove unread packets from stream if !rows.rs.done { err = mc.readUntilEOF() diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go index ce7fe4cd0..f7e370939 100644 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -13,7 +13,6 @@ import ( "fmt" "io" "reflect" - "strconv" ) type mysqlStmt struct { @@ -164,14 +163,8 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) { } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return rv.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(rv.Uint()), nil - case reflect.Uint64: - u64 := rv.Uint() - if u64 >= 1<<63 { - return strconv.FormatUint(u64, 10), nil - } - return int64(u64), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint(), nil case reflect.Float32, reflect.Float64: return rv.Float(), nil case reflect.Bool: diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go index cb3650bb9..9552e80b5 100644 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -56,7 +56,7 @@ var ( // db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") // func RegisterTLSConfig(key string, config *tls.Config) error { - if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" { return fmt.Errorf("key '%s' is reserved", key) } @@ -106,60 +106,6 @@ func readBool(input string) (value bool, valid bool) { * Time related utils * ******************************************************************************/ -// NullTime represents a time.Time that may be NULL. -// NullTime implements the Scanner interface so -// it can be used as a scan destination: -// -// var nt NullTime -// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) -// ... -// if nt.Valid { -// // use nt.Time -// } else { -// // NULL value -// } -// -// This NullTime implementation is not driver-specific -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -// The value type must be time.Time or string / []byte (formatted time-string), -// otherwise Scan fails. -func (nt *NullTime) Scan(value interface{}) (err error) { - if value == nil { - nt.Time, nt.Valid = time.Time{}, false - return - } - - switch v := value.(type) { - case time.Time: - nt.Time, nt.Valid = v, true - return - case []byte: - nt.Time, err = parseDateTime(string(v), time.UTC) - nt.Valid = (err == nil) - return - case string: - nt.Time, err = parseDateTime(v, time.UTC) - nt.Valid = (err == nil) - return - } - - nt.Valid = false - return fmt.Errorf("Can't convert %T to time.Time", value) -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} - func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { base := "0000-00-00 00:00:00.0000000" switch len(str) { @@ -684,7 +630,7 @@ type atomicBool struct { value uint32 } -// IsSet returns wether the current boolean value is true +// IsSet returns whether the current boolean value is true func (ab *atomicBool) IsSet() bool { return atomic.LoadUint32(&ab.value) > 0 } @@ -698,7 +644,7 @@ func (ab *atomicBool) Set(value bool) { } } -// TrySet sets the value of the bool and returns wether the value changed +// TrySet sets the value of the bool and returns whether the value changed func (ab *atomicBool) TrySet(value bool) bool { if value { return atomic.SwapUint32(&ab.value, 1) == 0 diff --git a/vendor/github.com/gopherjs/gopherjs/build/build.go b/vendor/github.com/gopherjs/gopherjs/build/build.go deleted file mode 100644 index f2513ba5b..000000000 --- a/vendor/github.com/gopherjs/gopherjs/build/build.go +++ /dev/null @@ -1,875 +0,0 @@ -package build - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - - "github.com/fsnotify/fsnotify" - "github.com/gopherjs/gopherjs/compiler" - "github.com/gopherjs/gopherjs/compiler/gopherjspkg" - "github.com/gopherjs/gopherjs/compiler/natives" - "github.com/neelance/sourcemap" - "github.com/shurcooL/httpfs/vfsutil" - "golang.org/x/tools/go/buildutil" -) - -type ImportCError struct { - pkgPath string -} - -func (e *ImportCError) Error() string { - return e.pkgPath + `: importing "C" is not supported by GopherJS` -} - -// NewBuildContext creates a build context for building Go packages -// with GopherJS compiler. -// -// Core GopherJS packages (i.e., "github.com/gopherjs/gopherjs/js", "github.com/gopherjs/gopherjs/nosync") -// are loaded from gopherjspkg.FS virtual filesystem rather than GOPATH. -func NewBuildContext(installSuffix string, buildTags []string) *build.Context { - gopherjsRoot := filepath.Join(build.Default.GOROOT, "src", "github.com", "gopherjs", "gopherjs") - return &build.Context{ - GOROOT: build.Default.GOROOT, - GOPATH: build.Default.GOPATH, - GOOS: build.Default.GOOS, - GOARCH: "js", - InstallSuffix: installSuffix, - Compiler: "gc", - BuildTags: append(buildTags, - "netgo", // See https://godoc.org/net#hdr-Name_Resolution. - "purego", // See https://golang.org/issues/23172. - ), - ReleaseTags: build.Default.ReleaseTags, - CgoEnabled: true, // detect `import "C"` to throw proper error - - IsDir: func(path string) bool { - if strings.HasPrefix(path, gopherjsRoot+string(filepath.Separator)) { - path = filepath.ToSlash(path[len(gopherjsRoot):]) - if fi, err := vfsutil.Stat(gopherjspkg.FS, path); err == nil { - return fi.IsDir() - } - } - fi, err := os.Stat(path) - return err == nil && fi.IsDir() - }, - ReadDir: func(path string) ([]os.FileInfo, error) { - if strings.HasPrefix(path, gopherjsRoot+string(filepath.Separator)) { - path = filepath.ToSlash(path[len(gopherjsRoot):]) - if fis, err := vfsutil.ReadDir(gopherjspkg.FS, path); err == nil { - return fis, nil - } - } - return ioutil.ReadDir(path) - }, - OpenFile: func(path string) (io.ReadCloser, error) { - if strings.HasPrefix(path, gopherjsRoot+string(filepath.Separator)) { - path = filepath.ToSlash(path[len(gopherjsRoot):]) - if f, err := gopherjspkg.FS.Open(path); err == nil { - return f, nil - } - } - return os.Open(path) - }, - } -} - -// statFile returns an os.FileInfo describing the named file. -// For files in "$GOROOT/src/github.com/gopherjs/gopherjs" directory, -// gopherjspkg.FS is consulted first. -func statFile(path string) (os.FileInfo, error) { - gopherjsRoot := filepath.Join(build.Default.GOROOT, "src", "github.com", "gopherjs", "gopherjs") - if strings.HasPrefix(path, gopherjsRoot+string(filepath.Separator)) { - path = filepath.ToSlash(path[len(gopherjsRoot):]) - if fi, err := vfsutil.Stat(gopherjspkg.FS, path); err == nil { - return fi, nil - } - } - return os.Stat(path) -} - -// Import returns details about the Go package named by the import path. If the -// path is a local import path naming a package that can be imported using -// a standard import path, the returned package will set p.ImportPath to -// that path. -// -// In the directory containing the package, .go and .inc.js files are -// considered part of the package except for: -// -// - .go files in package documentation -// - files starting with _ or . (likely editor temporary files) -// - files with build constraints not satisfied by the context -// -// If an error occurs, Import returns a non-nil error and a nil -// *PackageData. -func Import(path string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) { - wd, err := os.Getwd() - if err != nil { - // Getwd may fail if we're in GOARCH=js mode. That's okay, handle - // it by falling back to empty working directory. It just means - // Import will not be able to resolve relative import paths. - wd = "" - } - bctx := NewBuildContext(installSuffix, buildTags) - return importWithSrcDir(*bctx, path, wd, mode, installSuffix) -} - -func importWithSrcDir(bctx build.Context, path string, srcDir string, mode build.ImportMode, installSuffix string) (*PackageData, error) { - // bctx is passed by value, so it can be modified here. - var isVirtual bool - switch path { - case "syscall": - // syscall needs to use a typical GOARCH like amd64 to pick up definitions for _Socklen, BpfInsn, IFNAMSIZ, Timeval, BpfStat, SYS_FCNTL, Flock_t, etc. - bctx.GOARCH = runtime.GOARCH - bctx.InstallSuffix = "js" - if installSuffix != "" { - bctx.InstallSuffix += "_" + installSuffix - } - case "math/big": - // Use pure Go version of math/big; we don't want non-Go assembly versions. - bctx.BuildTags = append(bctx.BuildTags, "math_big_pure_go") - case "crypto/x509", "os/user": - // These stdlib packages have cgo and non-cgo versions (via build tags); we want the latter. - bctx.CgoEnabled = false - case "github.com/gopherjs/gopherjs/js", "github.com/gopherjs/gopherjs/nosync": - // These packages are already embedded via gopherjspkg.FS virtual filesystem (which can be - // safely vendored). Don't try to use vendor directory to resolve them. - mode |= build.IgnoreVendor - isVirtual = true - } - pkg, err := bctx.Import(path, srcDir, mode) - if err != nil { - return nil, err - } - - switch path { - case "os": - pkg.GoFiles = excludeExecutable(pkg.GoFiles) // Need to exclude executable implementation files, because some of them contain package scope variables that perform (indirectly) syscalls on init. - case "runtime": - pkg.GoFiles = []string{"error.go"} - case "runtime/internal/sys": - pkg.GoFiles = []string{fmt.Sprintf("zgoos_%s.go", bctx.GOOS), "zversion.go"} - case "runtime/pprof": - pkg.GoFiles = nil - case "internal/poll": - pkg.GoFiles = exclude(pkg.GoFiles, "fd_poll_runtime.go") - case "crypto/rand": - pkg.GoFiles = []string{"rand.go", "util.go"} - pkg.TestGoFiles = exclude(pkg.TestGoFiles, "rand_linux_test.go") // Don't want linux-specific tests (since linux-specific package files are excluded too). - } - - if len(pkg.CgoFiles) > 0 { - return nil, &ImportCError{path} - } - - if pkg.IsCommand() { - pkg.PkgObj = filepath.Join(pkg.BinDir, filepath.Base(pkg.ImportPath)+".js") - } - - if _, err := os.Stat(pkg.PkgObj); os.IsNotExist(err) && strings.HasPrefix(pkg.PkgObj, build.Default.GOROOT) { - // fall back to GOPATH - firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0] // TODO: Need to check inside all GOPATH workspaces. - gopathPkgObj := filepath.Join(firstGopathWorkspace, pkg.PkgObj[len(build.Default.GOROOT):]) - if _, err := os.Stat(gopathPkgObj); err == nil { - pkg.PkgObj = gopathPkgObj - } - } - - jsFiles, err := jsFilesFromDir(&bctx, pkg.Dir) - if err != nil { - return nil, err - } - - return &PackageData{Package: pkg, JSFiles: jsFiles, IsVirtual: isVirtual}, nil -} - -// excludeExecutable excludes all executable implementation .go files. -// They have "executable_" prefix. -func excludeExecutable(goFiles []string) []string { - var s []string - for _, f := range goFiles { - if strings.HasPrefix(f, "executable_") { - continue - } - s = append(s, f) - } - return s -} - -// exclude returns files, excluding specified files. -func exclude(files []string, exclude ...string) []string { - var s []string -Outer: - for _, f := range files { - for _, e := range exclude { - if f == e { - continue Outer - } - } - s = append(s, f) - } - return s -} - -// ImportDir is like Import but processes the Go package found in the named -// directory. -func ImportDir(dir string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) { - bctx := NewBuildContext(installSuffix, buildTags) - pkg, err := bctx.ImportDir(dir, mode) - if err != nil { - return nil, err - } - - jsFiles, err := jsFilesFromDir(bctx, pkg.Dir) - if err != nil { - return nil, err - } - - return &PackageData{Package: pkg, JSFiles: jsFiles}, nil -} - -// parseAndAugment parses and returns all .go files of given pkg. -// Standard Go library packages are augmented with files in compiler/natives folder. -// If isTest is true and pkg.ImportPath has no _test suffix, package is built for running internal tests. -// If isTest is true and pkg.ImportPath has _test suffix, package is built for running external tests. -// -// The native packages are augmented by the contents of natives.FS in the following way. -// The file names do not matter except the usual `_test` suffix. The files for -// native overrides get added to the package (even if they have the same name -// as an existing file from the standard library). For all identifiers that exist -// in the original AND the overrides, the original identifier in the AST gets -// replaced by `_`. New identifiers that don't exist in original package get added. -func parseAndAugment(bctx *build.Context, pkg *build.Package, isTest bool, fileSet *token.FileSet) ([]*ast.File, error) { - var files []*ast.File - replacedDeclNames := make(map[string]bool) - funcName := func(d *ast.FuncDecl) string { - if d.Recv == nil || len(d.Recv.List) == 0 { - return d.Name.Name - } - recv := d.Recv.List[0].Type - if star, ok := recv.(*ast.StarExpr); ok { - recv = star.X - } - return recv.(*ast.Ident).Name + "." + d.Name.Name - } - isXTest := strings.HasSuffix(pkg.ImportPath, "_test") - importPath := pkg.ImportPath - if isXTest { - importPath = importPath[:len(importPath)-5] - } - - nativesContext := &build.Context{ - GOROOT: "/", - GOOS: build.Default.GOOS, - GOARCH: "js", - Compiler: "gc", - JoinPath: path.Join, - SplitPathList: func(list string) []string { - if list == "" { - return nil - } - return strings.Split(list, "/") - }, - IsAbsPath: path.IsAbs, - IsDir: func(name string) bool { - dir, err := natives.FS.Open(name) - if err != nil { - return false - } - defer dir.Close() - info, err := dir.Stat() - if err != nil { - return false - } - return info.IsDir() - }, - HasSubdir: func(root, name string) (rel string, ok bool) { - panic("not implemented") - }, - ReadDir: func(name string) (fi []os.FileInfo, err error) { - dir, err := natives.FS.Open(name) - if err != nil { - return nil, err - } - defer dir.Close() - return dir.Readdir(0) - }, - OpenFile: func(name string) (r io.ReadCloser, err error) { - return natives.FS.Open(name) - }, - } - - // reflect needs to tell Go 1.11 apart from Go 1.11.1 for https://github.com/gopherjs/gopherjs/issues/862, - // so provide it with the custom go1.11.1 build tag whenever we're on Go 1.11.1 or later. - // TODO: Remove this ad hoc special behavior in GopherJS 1.12. - if runtime.Version() != "go1.11" { - nativesContext.ReleaseTags = append(nativesContext.ReleaseTags, "go1.11.1") - } - - if nativesPkg, err := nativesContext.Import(importPath, "", 0); err == nil { - names := nativesPkg.GoFiles - if isTest { - names = append(names, nativesPkg.TestGoFiles...) - } - if isXTest { - names = nativesPkg.XTestGoFiles - } - for _, name := range names { - fullPath := path.Join(nativesPkg.Dir, name) - r, err := nativesContext.OpenFile(fullPath) - if err != nil { - panic(err) - } - file, err := parser.ParseFile(fileSet, fullPath, r, parser.ParseComments) - if err != nil { - panic(err) - } - r.Close() - for _, decl := range file.Decls { - switch d := decl.(type) { - case *ast.FuncDecl: - replacedDeclNames[funcName(d)] = true - case *ast.GenDecl: - switch d.Tok { - case token.TYPE: - for _, spec := range d.Specs { - replacedDeclNames[spec.(*ast.TypeSpec).Name.Name] = true - } - case token.VAR, token.CONST: - for _, spec := range d.Specs { - for _, name := range spec.(*ast.ValueSpec).Names { - replacedDeclNames[name.Name] = true - } - } - } - } - } - files = append(files, file) - } - } - delete(replacedDeclNames, "init") - - var errList compiler.ErrorList - for _, name := range pkg.GoFiles { - if !filepath.IsAbs(name) { // name might be absolute if specified directly. E.g., `gopherjs build /abs/file.go`. - name = filepath.Join(pkg.Dir, name) - } - r, err := buildutil.OpenFile(bctx, name) - if err != nil { - return nil, err - } - file, err := parser.ParseFile(fileSet, name, r, parser.ParseComments) - r.Close() - if err != nil { - if list, isList := err.(scanner.ErrorList); isList { - if len(list) > 10 { - list = append(list[:10], &scanner.Error{Pos: list[9].Pos, Msg: "too many errors"}) - } - for _, entry := range list { - errList = append(errList, entry) - } - continue - } - errList = append(errList, err) - continue - } - - switch pkg.ImportPath { - case "crypto/rand", "encoding/gob", "encoding/json", "expvar", "go/token", "log", "math/big", "math/rand", "regexp", "testing", "time": - for _, spec := range file.Imports { - path, _ := strconv.Unquote(spec.Path.Value) - if path == "sync" { - if spec.Name == nil { - spec.Name = ast.NewIdent("sync") - } - spec.Path.Value = `"github.com/gopherjs/gopherjs/nosync"` - } - } - } - - for _, decl := range file.Decls { - switch d := decl.(type) { - case *ast.FuncDecl: - if replacedDeclNames[funcName(d)] { - d.Name = ast.NewIdent("_") - } - case *ast.GenDecl: - switch d.Tok { - case token.TYPE: - for _, spec := range d.Specs { - s := spec.(*ast.TypeSpec) - if replacedDeclNames[s.Name.Name] { - s.Name = ast.NewIdent("_") - } - } - case token.VAR, token.CONST: - for _, spec := range d.Specs { - s := spec.(*ast.ValueSpec) - for i, name := range s.Names { - if replacedDeclNames[name.Name] { - s.Names[i] = ast.NewIdent("_") - } - } - } - } - } - } - files = append(files, file) - } - if errList != nil { - return nil, errList - } - return files, nil -} - -type Options struct { - GOROOT string - GOPATH string - Verbose bool - Quiet bool - Watch bool - CreateMapFile bool - MapToLocalDisk bool - Minify bool - Color bool - BuildTags []string -} - -func (o *Options) PrintError(format string, a ...interface{}) { - if o.Color { - format = "\x1B[31m" + format + "\x1B[39m" - } - fmt.Fprintf(os.Stderr, format, a...) -} - -func (o *Options) PrintSuccess(format string, a ...interface{}) { - if o.Color { - format = "\x1B[32m" + format + "\x1B[39m" - } - fmt.Fprintf(os.Stderr, format, a...) -} - -type PackageData struct { - *build.Package - JSFiles []string - IsTest bool // IsTest is true if the package is being built for running tests. - SrcModTime time.Time - UpToDate bool - IsVirtual bool // If true, the package does not have a corresponding physical directory on disk. -} - -type Session struct { - options *Options - bctx *build.Context - Archives map[string]*compiler.Archive - Types map[string]*types.Package - Watcher *fsnotify.Watcher -} - -func NewSession(options *Options) *Session { - if options.GOROOT == "" { - options.GOROOT = build.Default.GOROOT - } - if options.GOPATH == "" { - options.GOPATH = build.Default.GOPATH - } - options.Verbose = options.Verbose || options.Watch - - s := &Session{ - options: options, - Archives: make(map[string]*compiler.Archive), - } - s.bctx = NewBuildContext(s.InstallSuffix(), s.options.BuildTags) - s.Types = make(map[string]*types.Package) - if options.Watch { - if out, err := exec.Command("ulimit", "-n").Output(); err == nil { - if n, err := strconv.Atoi(strings.TrimSpace(string(out))); err == nil && n < 1024 { - fmt.Printf("Warning: The maximum number of open file descriptors is very low (%d). Change it with 'ulimit -n 8192'.\n", n) - } - } - - var err error - s.Watcher, err = fsnotify.NewWatcher() - if err != nil { - panic(err) - } - } - return s -} - -// BuildContext returns the session's build context. -func (s *Session) BuildContext() *build.Context { return s.bctx } - -func (s *Session) InstallSuffix() string { - if s.options.Minify { - return "min" - } - return "" -} - -func (s *Session) BuildDir(packagePath string, importPath string, pkgObj string) error { - if s.Watcher != nil { - s.Watcher.Add(packagePath) - } - buildPkg, err := s.bctx.ImportDir(packagePath, 0) - if err != nil { - return err - } - pkg := &PackageData{Package: buildPkg} - jsFiles, err := jsFilesFromDir(s.bctx, pkg.Dir) - if err != nil { - return err - } - pkg.JSFiles = jsFiles - archive, err := s.BuildPackage(pkg) - if err != nil { - return err - } - if pkgObj == "" { - pkgObj = filepath.Base(packagePath) + ".js" - } - if pkg.IsCommand() && !pkg.UpToDate { - if err := s.WriteCommandPackage(archive, pkgObj); err != nil { - return err - } - } - return nil -} - -func (s *Session) BuildFiles(filenames []string, pkgObj string, packagePath string) error { - pkg := &PackageData{ - Package: &build.Package{ - Name: "main", - ImportPath: "main", - Dir: packagePath, - }, - } - - for _, file := range filenames { - if strings.HasSuffix(file, ".inc.js") { - pkg.JSFiles = append(pkg.JSFiles, file) - continue - } - pkg.GoFiles = append(pkg.GoFiles, file) - } - - archive, err := s.BuildPackage(pkg) - if err != nil { - return err - } - if s.Types["main"].Name() != "main" { - return fmt.Errorf("cannot build/run non-main package") - } - return s.WriteCommandPackage(archive, pkgObj) -} - -func (s *Session) BuildImportPath(path string) (*compiler.Archive, error) { - _, archive, err := s.buildImportPathWithSrcDir(path, "") - return archive, err -} - -func (s *Session) buildImportPathWithSrcDir(path string, srcDir string) (*PackageData, *compiler.Archive, error) { - pkg, err := importWithSrcDir(*s.bctx, path, srcDir, 0, s.InstallSuffix()) - if s.Watcher != nil && pkg != nil { // add watch even on error - s.Watcher.Add(pkg.Dir) - } - if err != nil { - return nil, nil, err - } - - archive, err := s.BuildPackage(pkg) - if err != nil { - return nil, nil, err - } - - return pkg, archive, nil -} - -func (s *Session) BuildPackage(pkg *PackageData) (*compiler.Archive, error) { - if archive, ok := s.Archives[pkg.ImportPath]; ok { - return archive, nil - } - - if pkg.PkgObj != "" { - var fileInfo os.FileInfo - gopherjsBinary, err := os.Executable() - if err == nil { - fileInfo, err = os.Stat(gopherjsBinary) - if err == nil { - pkg.SrcModTime = fileInfo.ModTime() - } - } - if err != nil { - os.Stderr.WriteString("Could not get GopherJS binary's modification timestamp. Please report issue.\n") - pkg.SrcModTime = time.Now() - } - - for _, importedPkgPath := range pkg.Imports { - // Ignore all imports that aren't mentioned in import specs of pkg. - // For example, this ignores imports such as runtime/internal/sys and runtime/internal/atomic. - ignored := true - for _, pos := range pkg.ImportPos[importedPkgPath] { - importFile := filepath.Base(pos.Filename) - for _, file := range pkg.GoFiles { - if importFile == file { - ignored = false - break - } - } - if !ignored { - break - } - } - - if importedPkgPath == "unsafe" || ignored { - continue - } - importedPkg, _, err := s.buildImportPathWithSrcDir(importedPkgPath, pkg.Dir) - if err != nil { - return nil, err - } - impModTime := importedPkg.SrcModTime - if impModTime.After(pkg.SrcModTime) { - pkg.SrcModTime = impModTime - } - } - - for _, name := range append(pkg.GoFiles, pkg.JSFiles...) { - fileInfo, err := statFile(filepath.Join(pkg.Dir, name)) - if err != nil { - return nil, err - } - if fileInfo.ModTime().After(pkg.SrcModTime) { - pkg.SrcModTime = fileInfo.ModTime() - } - } - - pkgObjFileInfo, err := os.Stat(pkg.PkgObj) - if err == nil && !pkg.SrcModTime.After(pkgObjFileInfo.ModTime()) { - // package object is up to date, load from disk if library - pkg.UpToDate = true - if pkg.IsCommand() { - return nil, nil - } - - objFile, err := os.Open(pkg.PkgObj) - if err != nil { - return nil, err - } - defer objFile.Close() - - archive, err := compiler.ReadArchive(pkg.PkgObj, pkg.ImportPath, objFile, s.Types) - if err != nil { - return nil, err - } - - s.Archives[pkg.ImportPath] = archive - return archive, err - } - } - - fileSet := token.NewFileSet() - files, err := parseAndAugment(s.bctx, pkg.Package, pkg.IsTest, fileSet) - if err != nil { - return nil, err - } - - localImportPathCache := make(map[string]*compiler.Archive) - importContext := &compiler.ImportContext{ - Packages: s.Types, - Import: func(path string) (*compiler.Archive, error) { - if archive, ok := localImportPathCache[path]; ok { - return archive, nil - } - _, archive, err := s.buildImportPathWithSrcDir(path, pkg.Dir) - if err != nil { - return nil, err - } - localImportPathCache[path] = archive - return archive, nil - }, - } - archive, err := compiler.Compile(pkg.ImportPath, files, fileSet, importContext, s.options.Minify) - if err != nil { - return nil, err - } - - for _, jsFile := range pkg.JSFiles { - code, err := ioutil.ReadFile(filepath.Join(pkg.Dir, jsFile)) - if err != nil { - return nil, err - } - archive.IncJSCode = append(archive.IncJSCode, []byte("\t(function() {\n")...) - archive.IncJSCode = append(archive.IncJSCode, code...) - archive.IncJSCode = append(archive.IncJSCode, []byte("\n\t}).call($global);\n")...) - } - - if s.options.Verbose { - fmt.Println(pkg.ImportPath) - } - - s.Archives[pkg.ImportPath] = archive - - if pkg.PkgObj == "" || pkg.IsCommand() { - return archive, nil - } - - if err := s.writeLibraryPackage(archive, pkg.PkgObj); err != nil { - if strings.HasPrefix(pkg.PkgObj, s.options.GOROOT) { - // fall back to first GOPATH workspace - firstGopathWorkspace := filepath.SplitList(s.options.GOPATH)[0] - if err := s.writeLibraryPackage(archive, filepath.Join(firstGopathWorkspace, pkg.PkgObj[len(s.options.GOROOT):])); err != nil { - return nil, err - } - return archive, nil - } - return nil, err - } - - return archive, nil -} - -func (s *Session) writeLibraryPackage(archive *compiler.Archive, pkgObj string) error { - if err := os.MkdirAll(filepath.Dir(pkgObj), 0777); err != nil { - return err - } - - objFile, err := os.Create(pkgObj) - if err != nil { - return err - } - defer objFile.Close() - - return compiler.WriteArchive(archive, objFile) -} - -func (s *Session) WriteCommandPackage(archive *compiler.Archive, pkgObj string) error { - if err := os.MkdirAll(filepath.Dir(pkgObj), 0777); err != nil { - return err - } - codeFile, err := os.Create(pkgObj) - if err != nil { - return err - } - defer codeFile.Close() - - sourceMapFilter := &compiler.SourceMapFilter{Writer: codeFile} - if s.options.CreateMapFile { - m := &sourcemap.Map{File: filepath.Base(pkgObj)} - mapFile, err := os.Create(pkgObj + ".map") - if err != nil { - return err - } - - defer func() { - m.WriteTo(mapFile) - mapFile.Close() - fmt.Fprintf(codeFile, "//# sourceMappingURL=%s.map\n", filepath.Base(pkgObj)) - }() - - sourceMapFilter.MappingCallback = NewMappingCallback(m, s.options.GOROOT, s.options.GOPATH, s.options.MapToLocalDisk) - } - - deps, err := compiler.ImportDependencies(archive, func(path string) (*compiler.Archive, error) { - if archive, ok := s.Archives[path]; ok { - return archive, nil - } - _, archive, err := s.buildImportPathWithSrcDir(path, "") - return archive, err - }) - if err != nil { - return err - } - return compiler.WriteProgramCode(deps, sourceMapFilter) -} - -func NewMappingCallback(m *sourcemap.Map, goroot, gopath string, localMap bool) func(generatedLine, generatedColumn int, originalPos token.Position) { - return func(generatedLine, generatedColumn int, originalPos token.Position) { - if !originalPos.IsValid() { - m.AddMapping(&sourcemap.Mapping{GeneratedLine: generatedLine, GeneratedColumn: generatedColumn}) - return - } - - file := originalPos.Filename - - switch hasGopathPrefix, prefixLen := hasGopathPrefix(file, gopath); { - case localMap: - // no-op: keep file as-is - case hasGopathPrefix: - file = filepath.ToSlash(file[prefixLen+4:]) - case strings.HasPrefix(file, goroot): - file = filepath.ToSlash(file[len(goroot)+4:]) - default: - file = filepath.Base(file) - } - - m.AddMapping(&sourcemap.Mapping{GeneratedLine: generatedLine, GeneratedColumn: generatedColumn, OriginalFile: file, OriginalLine: originalPos.Line, OriginalColumn: originalPos.Column}) - } -} - -func jsFilesFromDir(bctx *build.Context, dir string) ([]string, error) { - files, err := buildutil.ReadDir(bctx, dir) - if err != nil { - return nil, err - } - var jsFiles []string - for _, file := range files { - if strings.HasSuffix(file.Name(), ".inc.js") && file.Name()[0] != '_' && file.Name()[0] != '.' { - jsFiles = append(jsFiles, file.Name()) - } - } - return jsFiles, nil -} - -// hasGopathPrefix returns true and the length of the matched GOPATH workspace, -// iff file has a prefix that matches one of the GOPATH workspaces. -func hasGopathPrefix(file, gopath string) (hasGopathPrefix bool, prefixLen int) { - gopathWorkspaces := filepath.SplitList(gopath) - for _, gopathWorkspace := range gopathWorkspaces { - gopathWorkspace = filepath.Clean(gopathWorkspace) - if strings.HasPrefix(file, gopathWorkspace) { - return true, len(gopathWorkspace) - } - } - return false, 0 -} - -func (s *Session) WaitForChange() { - s.options.PrintSuccess("watching for changes...\n") - for { - select { - case ev := <-s.Watcher.Events: - if ev.Op&(fsnotify.Create|fsnotify.Write|fsnotify.Remove|fsnotify.Rename) == 0 || filepath.Base(ev.Name)[0] == '.' { - continue - } - if !strings.HasSuffix(ev.Name, ".go") && !strings.HasSuffix(ev.Name, ".inc.js") { - continue - } - s.options.PrintSuccess("change detected: %s\n", ev.Name) - case err := <-s.Watcher.Errors: - s.options.PrintError("watcher error: %s\n", err.Error()) - } - break - } - - go func() { - for range s.Watcher.Events { - // consume, else Close() may deadlock - } - }() - s.Watcher.Close() -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/bool.go b/vendor/github.com/gopherjs/gopherjs/compiler/analysis/bool.go deleted file mode 100644 index cba7c1c14..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/bool.go +++ /dev/null @@ -1,43 +0,0 @@ -package analysis - -import ( - "go/ast" - "go/constant" - "go/token" - "go/types" -) - -func BoolValue(expr ast.Expr, info *types.Info) (bool, bool) { - v := info.Types[expr].Value - if v != nil && v.Kind() == constant.Bool { - return constant.BoolVal(v), true - } - switch e := expr.(type) { - case *ast.BinaryExpr: - switch e.Op { - case token.LAND: - if b, ok := BoolValue(e.X, info); ok { - if !b { - return false, true - } - return BoolValue(e.Y, info) - } - case token.LOR: - if b, ok := BoolValue(e.X, info); ok { - if b { - return true, true - } - return BoolValue(e.Y, info) - } - } - case *ast.UnaryExpr: - if e.Op == token.NOT { - if b, ok := BoolValue(e.X, info); ok { - return !b, true - } - } - case *ast.ParenExpr: - return BoolValue(e.X, info) - } - return false, false -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/break.go b/vendor/github.com/gopherjs/gopherjs/compiler/analysis/break.go deleted file mode 100644 index 579815d92..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/break.go +++ /dev/null @@ -1,32 +0,0 @@ -package analysis - -import ( - "go/ast" - "go/token" -) - -func HasBreak(n ast.Node) bool { - v := hasBreakVisitor{} - ast.Walk(&v, n) - return v.hasBreak -} - -type hasBreakVisitor struct { - hasBreak bool -} - -func (v *hasBreakVisitor) Visit(node ast.Node) (w ast.Visitor) { - if v.hasBreak { - return nil - } - switch n := node.(type) { - case *ast.BranchStmt: - if n.Tok == token.BREAK && n.Label == nil { - v.hasBreak = true - return nil - } - case *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt, ast.Expr: - return nil - } - return v -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/escape.go b/vendor/github.com/gopherjs/gopherjs/compiler/analysis/escape.go deleted file mode 100644 index 2807ecf64..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/escape.go +++ /dev/null @@ -1,70 +0,0 @@ -package analysis - -import ( - "go/ast" - "go/token" - "go/types" -) - -func EscapingObjects(n ast.Node, info *types.Info) []*types.Var { - v := escapeAnalysis{ - info: info, - escaping: make(map[*types.Var]bool), - topScope: info.Scopes[n], - bottomScopes: make(map[*types.Scope]bool), - } - ast.Walk(&v, n) - var list []*types.Var - for obj := range v.escaping { - list = append(list, obj) - } - return list -} - -type escapeAnalysis struct { - info *types.Info - escaping map[*types.Var]bool - topScope *types.Scope - bottomScopes map[*types.Scope]bool -} - -func (v *escapeAnalysis) Visit(node ast.Node) (w ast.Visitor) { - // huge overapproximation - switch n := node.(type) { - case *ast.UnaryExpr: - if n.Op == token.AND { - if _, ok := n.X.(*ast.Ident); ok { - return &escapingObjectCollector{v} - } - } - case *ast.FuncLit: - v.bottomScopes[v.info.Scopes[n.Type]] = true - return &escapingObjectCollector{v} - case *ast.ForStmt: - v.bottomScopes[v.info.Scopes[n.Body]] = true - case *ast.RangeStmt: - v.bottomScopes[v.info.Scopes[n.Body]] = true - } - return v -} - -type escapingObjectCollector struct { - analysis *escapeAnalysis -} - -func (v *escapingObjectCollector) Visit(node ast.Node) (w ast.Visitor) { - if id, ok := node.(*ast.Ident); ok { - if obj, ok := v.analysis.info.Uses[id].(*types.Var); ok { - for s := obj.Parent(); s != nil; s = s.Parent() { - if s == v.analysis.topScope { - v.analysis.escaping[obj] = true - break - } - if v.analysis.bottomScopes[s] { - break - } - } - } - } - return v -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/info.go b/vendor/github.com/gopherjs/gopherjs/compiler/analysis/info.go deleted file mode 100644 index a8181615f..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/info.go +++ /dev/null @@ -1,254 +0,0 @@ -package analysis - -import ( - "go/ast" - "go/token" - "go/types" - - "github.com/gopherjs/gopherjs/compiler/astutil" - "github.com/gopherjs/gopherjs/compiler/typesutil" -) - -type continueStmt struct { - forStmt *ast.ForStmt - analyzeStack []ast.Node -} - -type Info struct { - *types.Info - Pkg *types.Package - IsBlocking func(*types.Func) bool - HasPointer map[*types.Var]bool - FuncDeclInfos map[*types.Func]*FuncInfo - FuncLitInfos map[*ast.FuncLit]*FuncInfo - InitFuncInfo *FuncInfo - allInfos []*FuncInfo - comments ast.CommentMap -} - -type FuncInfo struct { - HasDefer bool - Flattened map[ast.Node]bool - Blocking map[ast.Node]bool - GotoLabel map[*types.Label]bool - LocalCalls map[*types.Func][][]ast.Node - ContinueStmts []continueStmt - p *Info - analyzeStack []ast.Node -} - -func (info *Info) newFuncInfo() *FuncInfo { - funcInfo := &FuncInfo{ - p: info, - Flattened: make(map[ast.Node]bool), - Blocking: make(map[ast.Node]bool), - GotoLabel: make(map[*types.Label]bool), - LocalCalls: make(map[*types.Func][][]ast.Node), - } - info.allInfos = append(info.allInfos, funcInfo) - return funcInfo -} - -func AnalyzePkg(files []*ast.File, fileSet *token.FileSet, typesInfo *types.Info, typesPkg *types.Package, isBlocking func(*types.Func) bool) *Info { - info := &Info{ - Info: typesInfo, - Pkg: typesPkg, - HasPointer: make(map[*types.Var]bool), - comments: make(ast.CommentMap), - IsBlocking: isBlocking, - FuncDeclInfos: make(map[*types.Func]*FuncInfo), - FuncLitInfos: make(map[*ast.FuncLit]*FuncInfo), - } - info.InitFuncInfo = info.newFuncInfo() - - for _, file := range files { - for k, v := range ast.NewCommentMap(fileSet, file, file.Comments) { - info.comments[k] = v - } - ast.Walk(info.InitFuncInfo, file) - } - - for { - done := true - for _, funcInfo := range info.allInfos { - for obj, calls := range funcInfo.LocalCalls { - if len(info.FuncDeclInfos[obj].Blocking) != 0 { - for _, call := range calls { - funcInfo.markBlocking(call) - } - delete(funcInfo.LocalCalls, obj) - done = false - } - } - } - if done { - break - } - } - - for _, funcInfo := range info.allInfos { - for _, continueStmt := range funcInfo.ContinueStmts { - if funcInfo.Blocking[continueStmt.forStmt.Post] { - funcInfo.markBlocking(continueStmt.analyzeStack) - } - } - } - - return info -} - -func (c *FuncInfo) Visit(node ast.Node) ast.Visitor { - if node == nil { - if len(c.analyzeStack) != 0 { - c.analyzeStack = c.analyzeStack[:len(c.analyzeStack)-1] - } - return nil - } - c.analyzeStack = append(c.analyzeStack, node) - - switch n := node.(type) { - case *ast.FuncDecl: - newInfo := c.p.newFuncInfo() - c.p.FuncDeclInfos[c.p.Defs[n.Name].(*types.Func)] = newInfo - return newInfo - case *ast.FuncLit: - newInfo := c.p.newFuncInfo() - c.p.FuncLitInfos[n] = newInfo - return newInfo - case *ast.BranchStmt: - switch n.Tok { - case token.GOTO: - for _, n2 := range c.analyzeStack { - c.Flattened[n2] = true - } - c.GotoLabel[c.p.Uses[n.Label].(*types.Label)] = true - case token.CONTINUE: - if n.Label != nil { - label := c.p.Uses[n.Label].(*types.Label) - for i := len(c.analyzeStack) - 1; i >= 0; i-- { - if labelStmt, ok := c.analyzeStack[i].(*ast.LabeledStmt); ok && c.p.Defs[labelStmt.Label] == label { - if _, ok := labelStmt.Stmt.(*ast.RangeStmt); ok { - return nil - } - stack := make([]ast.Node, len(c.analyzeStack)) - copy(stack, c.analyzeStack) - c.ContinueStmts = append(c.ContinueStmts, continueStmt{labelStmt.Stmt.(*ast.ForStmt), stack}) - return nil - } - } - return nil - } - for i := len(c.analyzeStack) - 1; i >= 0; i-- { - if _, ok := c.analyzeStack[i].(*ast.RangeStmt); ok { - return nil - } - if forStmt, ok := c.analyzeStack[i].(*ast.ForStmt); ok { - stack := make([]ast.Node, len(c.analyzeStack)) - copy(stack, c.analyzeStack) - c.ContinueStmts = append(c.ContinueStmts, continueStmt{forStmt, stack}) - return nil - } - } - } - case *ast.CallExpr: - callTo := func(obj types.Object) { - switch o := obj.(type) { - case *types.Func: - if recv := o.Type().(*types.Signature).Recv(); recv != nil { - if _, ok := recv.Type().Underlying().(*types.Interface); ok { - c.markBlocking(c.analyzeStack) - return - } - } - if o.Pkg() != c.p.Pkg { - if c.p.IsBlocking(o) { - c.markBlocking(c.analyzeStack) - } - return - } - stack := make([]ast.Node, len(c.analyzeStack)) - copy(stack, c.analyzeStack) - c.LocalCalls[o] = append(c.LocalCalls[o], stack) - case *types.Var: - c.markBlocking(c.analyzeStack) - } - } - switch f := astutil.RemoveParens(n.Fun).(type) { - case *ast.Ident: - callTo(c.p.Uses[f]) - case *ast.SelectorExpr: - if sel := c.p.Selections[f]; sel != nil && typesutil.IsJsObject(sel.Recv()) { - break - } - callTo(c.p.Uses[f.Sel]) - case *ast.FuncLit: - ast.Walk(c, n.Fun) - for _, arg := range n.Args { - ast.Walk(c, arg) - } - if len(c.p.FuncLitInfos[f].Blocking) != 0 { - c.markBlocking(c.analyzeStack) - } - return nil - default: - if !astutil.IsTypeExpr(f, c.p.Info) { - c.markBlocking(c.analyzeStack) - } - } - case *ast.SendStmt: - c.markBlocking(c.analyzeStack) - case *ast.UnaryExpr: - switch n.Op { - case token.AND: - if id, ok := astutil.RemoveParens(n.X).(*ast.Ident); ok { - c.p.HasPointer[c.p.Uses[id].(*types.Var)] = true - } - case token.ARROW: - c.markBlocking(c.analyzeStack) - } - case *ast.RangeStmt: - if _, ok := c.p.TypeOf(n.X).Underlying().(*types.Chan); ok { - c.markBlocking(c.analyzeStack) - } - case *ast.SelectStmt: - for _, s := range n.Body.List { - if s.(*ast.CommClause).Comm == nil { // default clause - return c - } - } - c.markBlocking(c.analyzeStack) - case *ast.CommClause: - switch comm := n.Comm.(type) { - case *ast.SendStmt: - ast.Walk(c, comm.Chan) - ast.Walk(c, comm.Value) - case *ast.ExprStmt: - ast.Walk(c, comm.X.(*ast.UnaryExpr).X) - case *ast.AssignStmt: - ast.Walk(c, comm.Rhs[0].(*ast.UnaryExpr).X) - } - for _, s := range n.Body { - ast.Walk(c, s) - } - return nil - case *ast.GoStmt: - ast.Walk(c, n.Call.Fun) - for _, arg := range n.Call.Args { - ast.Walk(c, arg) - } - return nil - case *ast.DeferStmt: - c.HasDefer = true - if funcLit, ok := n.Call.Fun.(*ast.FuncLit); ok { - ast.Walk(c, funcLit.Body) - } - } - return c -} - -func (c *FuncInfo) markBlocking(stack []ast.Node) { - for _, n := range stack { - c.Blocking[n] = true - c.Flattened[n] = true - } -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/sideeffect.go b/vendor/github.com/gopherjs/gopherjs/compiler/analysis/sideeffect.go deleted file mode 100644 index a94d92b67..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/analysis/sideeffect.go +++ /dev/null @@ -1,37 +0,0 @@ -package analysis - -import ( - "go/ast" - "go/token" - "go/types" -) - -func HasSideEffect(n ast.Node, info *types.Info) bool { - v := hasSideEffectVisitor{info: info} - ast.Walk(&v, n) - return v.hasSideEffect -} - -type hasSideEffectVisitor struct { - info *types.Info - hasSideEffect bool -} - -func (v *hasSideEffectVisitor) Visit(node ast.Node) (w ast.Visitor) { - if v.hasSideEffect { - return nil - } - switch n := node.(type) { - case *ast.CallExpr: - if _, isSig := v.info.TypeOf(n.Fun).(*types.Signature); isSig { // skip conversions - v.hasSideEffect = true - return nil - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - v.hasSideEffect = true - return nil - } - } - return v -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/astutil/astutil.go b/vendor/github.com/gopherjs/gopherjs/compiler/astutil/astutil.go deleted file mode 100644 index 7cd93b3dd..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/astutil/astutil.go +++ /dev/null @@ -1,48 +0,0 @@ -package astutil - -import ( - "go/ast" - "go/types" -) - -func RemoveParens(e ast.Expr) ast.Expr { - for { - p, isParen := e.(*ast.ParenExpr) - if !isParen { - return e - } - e = p.X - } -} - -func SetType(info *types.Info, t types.Type, e ast.Expr) ast.Expr { - info.Types[e] = types.TypeAndValue{Type: t} - return e -} - -func NewIdent(name string, t types.Type, info *types.Info, pkg *types.Package) *ast.Ident { - ident := ast.NewIdent(name) - info.Types[ident] = types.TypeAndValue{Type: t} - obj := types.NewVar(0, pkg, name, t) - info.Uses[ident] = obj - return ident -} - -func IsTypeExpr(expr ast.Expr, info *types.Info) bool { - switch e := expr.(type) { - case *ast.ArrayType, *ast.ChanType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.StructType: - return true - case *ast.StarExpr: - return IsTypeExpr(e.X, info) - case *ast.Ident: - _, ok := info.Uses[e].(*types.TypeName) - return ok - case *ast.SelectorExpr: - _, ok := info.Uses[e.Sel].(*types.TypeName) - return ok - case *ast.ParenExpr: - return IsTypeExpr(e.X, info) - default: - return false - } -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/compiler.go b/vendor/github.com/gopherjs/gopherjs/compiler/compiler.go deleted file mode 100644 index 36ec91a8e..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/compiler.go +++ /dev/null @@ -1,297 +0,0 @@ -package compiler - -import ( - "bytes" - "encoding/binary" - "encoding/gob" - "encoding/json" - "fmt" - "go/token" - "go/types" - "io" - "strings" - - "github.com/gopherjs/gopherjs/compiler/prelude" - "golang.org/x/tools/go/gcexportdata" -) - -var sizes32 = &types.StdSizes{WordSize: 4, MaxAlign: 8} -var reservedKeywords = make(map[string]bool) -var _ = ___GOPHERJS_REQUIRES_GO_VERSION_1_12___ // Compile error on other Go versions, because they're not supported. - -func init() { - for _, keyword := range []string{"abstract", "arguments", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "eval", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "let", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "undefined", "var", "void", "volatile", "while", "with", "yield"} { - reservedKeywords[keyword] = true - } -} - -type ErrorList []error - -func (err ErrorList) Error() string { - return err[0].Error() -} - -type Archive struct { - ImportPath string - Name string - Imports []string - ExportData []byte - Declarations []*Decl - IncJSCode []byte - FileSet []byte - Minified bool -} - -type Decl struct { - FullName string - Vars []string - DeclCode []byte - MethodListCode []byte - TypeInitCode []byte - InitCode []byte - DceObjectFilter string - DceMethodFilter string - DceDeps []string - Blocking bool -} - -type Dependency struct { - Pkg string - Type string - Method string -} - -func ImportDependencies(archive *Archive, importPkg func(string) (*Archive, error)) ([]*Archive, error) { - var deps []*Archive - paths := make(map[string]bool) - var collectDependencies func(path string) error - collectDependencies = func(path string) error { - if paths[path] { - return nil - } - dep, err := importPkg(path) - if err != nil { - return err - } - for _, imp := range dep.Imports { - if err := collectDependencies(imp); err != nil { - return err - } - } - deps = append(deps, dep) - paths[dep.ImportPath] = true - return nil - } - - if err := collectDependencies("runtime"); err != nil { - return nil, err - } - for _, imp := range archive.Imports { - if err := collectDependencies(imp); err != nil { - return nil, err - } - } - - deps = append(deps, archive) - return deps, nil -} - -type dceInfo struct { - decl *Decl - objectFilter string - methodFilter string -} - -func WriteProgramCode(pkgs []*Archive, w *SourceMapFilter) error { - mainPkg := pkgs[len(pkgs)-1] - minify := mainPkg.Minified - - byFilter := make(map[string][]*dceInfo) - var pendingDecls []*Decl - for _, pkg := range pkgs { - for _, d := range pkg.Declarations { - if d.DceObjectFilter == "" && d.DceMethodFilter == "" { - pendingDecls = append(pendingDecls, d) - continue - } - info := &dceInfo{decl: d} - if d.DceObjectFilter != "" { - info.objectFilter = pkg.ImportPath + "." + d.DceObjectFilter - byFilter[info.objectFilter] = append(byFilter[info.objectFilter], info) - } - if d.DceMethodFilter != "" { - info.methodFilter = pkg.ImportPath + "." + d.DceMethodFilter - byFilter[info.methodFilter] = append(byFilter[info.methodFilter], info) - } - } - } - - dceSelection := make(map[*Decl]struct{}) - for len(pendingDecls) != 0 { - d := pendingDecls[len(pendingDecls)-1] - pendingDecls = pendingDecls[:len(pendingDecls)-1] - - dceSelection[d] = struct{}{} - - for _, dep := range d.DceDeps { - if infos, ok := byFilter[dep]; ok { - delete(byFilter, dep) - for _, info := range infos { - if info.objectFilter == dep { - info.objectFilter = "" - } - if info.methodFilter == dep { - info.methodFilter = "" - } - if info.objectFilter == "" && info.methodFilter == "" { - pendingDecls = append(pendingDecls, info.decl) - } - } - } - } - } - - if _, err := w.Write([]byte("\"use strict\";\n(function() {\n\n")); err != nil { - return err - } - preludeJS := prelude.Prelude - if minify { - preludeJS = prelude.Minified - } - if _, err := io.WriteString(w, preludeJS); err != nil { - return err - } - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - - // write packages - for _, pkg := range pkgs { - if err := WritePkgCode(pkg, dceSelection, minify, w); err != nil { - return err - } - } - - if _, err := w.Write([]byte("$synthesizeMethods();\nvar $mainPkg = $packages[\"" + string(mainPkg.ImportPath) + "\"];\n$packages[\"runtime\"].$init();\n$go($mainPkg.$init, []);\n$flushConsole();\n\n}).call(this);\n")); err != nil { - return err - } - - return nil -} - -func WritePkgCode(pkg *Archive, dceSelection map[*Decl]struct{}, minify bool, w *SourceMapFilter) error { - if w.MappingCallback != nil && pkg.FileSet != nil { - w.fileSet = token.NewFileSet() - if err := w.fileSet.Read(json.NewDecoder(bytes.NewReader(pkg.FileSet)).Decode); err != nil { - panic(err) - } - } - if _, err := w.Write(pkg.IncJSCode); err != nil { - return err - } - if _, err := w.Write(removeWhitespace([]byte(fmt.Sprintf("$packages[\"%s\"] = (function() {\n", pkg.ImportPath)), minify)); err != nil { - return err - } - vars := []string{"$pkg = {}", "$init"} - var filteredDecls []*Decl - for _, d := range pkg.Declarations { - if _, ok := dceSelection[d]; ok { - vars = append(vars, d.Vars...) - filteredDecls = append(filteredDecls, d) - } - } - if _, err := w.Write(removeWhitespace([]byte(fmt.Sprintf("\tvar %s;\n", strings.Join(vars, ", "))), minify)); err != nil { - return err - } - for _, d := range filteredDecls { - if _, err := w.Write(d.DeclCode); err != nil { - return err - } - } - for _, d := range filteredDecls { - if _, err := w.Write(d.MethodListCode); err != nil { - return err - } - } - for _, d := range filteredDecls { - if _, err := w.Write(d.TypeInitCode); err != nil { - return err - } - } - - if _, err := w.Write(removeWhitespace([]byte("\t$init = function() {\n\t\t$pkg.$init = function() {};\n\t\t/* */ var $f, $c = false, $s = 0, $r; if (this !== undefined && this.$blk !== undefined) { $f = this; $c = true; $s = $f.$s; $r = $f.$r; } s: while (true) { switch ($s) { case 0:\n"), minify)); err != nil { - return err - } - for _, d := range filteredDecls { - if _, err := w.Write(d.InitCode); err != nil { - return err - } - } - if _, err := w.Write(removeWhitespace([]byte("\t\t/* */ } return; } if ($f === undefined) { $f = { $blk: $init }; } $f.$s = $s; $f.$r = $r; return $f;\n\t};\n\t$pkg.$init = $init;\n\treturn $pkg;\n})();"), minify)); err != nil { - return err - } - if _, err := w.Write([]byte("\n")); err != nil { // keep this \n even when minified - return err - } - return nil -} - -func ReadArchive(filename, path string, r io.Reader, packages map[string]*types.Package) (*Archive, error) { - var a Archive - if err := gob.NewDecoder(r).Decode(&a); err != nil { - return nil, err - } - - var err error - packages[path], err = gcexportdata.Read(bytes.NewReader(a.ExportData), token.NewFileSet(), packages, path) - if err != nil { - return nil, err - } - - return &a, nil -} - -func WriteArchive(a *Archive, w io.Writer) error { - return gob.NewEncoder(w).Encode(a) -} - -type SourceMapFilter struct { - Writer io.Writer - MappingCallback func(generatedLine, generatedColumn int, originalPos token.Position) - line int - column int - fileSet *token.FileSet -} - -func (f *SourceMapFilter) Write(p []byte) (n int, err error) { - var n2 int - for { - i := bytes.IndexByte(p, '\b') - w := p - if i != -1 { - w = p[:i] - } - - n2, err = f.Writer.Write(w) - n += n2 - for { - i := bytes.IndexByte(w, '\n') - if i == -1 { - f.column += len(w) - break - } - f.line++ - f.column = 0 - w = w[i+1:] - } - - if err != nil || i == -1 { - return - } - if f.MappingCallback != nil { - f.MappingCallback(f.line+1, f.column, f.fileSet.Position(token.Pos(binary.BigEndian.Uint32(p[i+1:i+5])))) - } - p = p[i+5:] - n += 5 - } -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/expressions.go b/vendor/github.com/gopherjs/gopherjs/compiler/expressions.go deleted file mode 100644 index 42fe624b6..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/expressions.go +++ /dev/null @@ -1,1373 +0,0 @@ -package compiler - -import ( - "bytes" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "sort" - "strconv" - "strings" - - "github.com/gopherjs/gopherjs/compiler/analysis" - "github.com/gopherjs/gopherjs/compiler/astutil" - "github.com/gopherjs/gopherjs/compiler/typesutil" -) - -type expression struct { - str string - parens bool -} - -func (e *expression) String() string { - return e.str -} - -func (e *expression) StringWithParens() string { - if e.parens { - return "(" + e.str + ")" - } - return e.str -} - -func (c *funcContext) translateExpr(expr ast.Expr) *expression { - exprType := c.p.TypeOf(expr) - if value := c.p.Types[expr].Value; value != nil { - basic := exprType.Underlying().(*types.Basic) - switch { - case isBoolean(basic): - return c.formatExpr("%s", strconv.FormatBool(constant.BoolVal(value))) - case isInteger(basic): - if is64Bit(basic) { - if basic.Kind() == types.Int64 { - d, ok := constant.Int64Val(constant.ToInt(value)) - if !ok { - panic("could not get exact uint") - } - return c.formatExpr("new %s(%s, %s)", c.typeName(exprType), strconv.FormatInt(d>>32, 10), strconv.FormatUint(uint64(d)&(1<<32-1), 10)) - } - d, ok := constant.Uint64Val(constant.ToInt(value)) - if !ok { - panic("could not get exact uint") - } - return c.formatExpr("new %s(%s, %s)", c.typeName(exprType), strconv.FormatUint(d>>32, 10), strconv.FormatUint(d&(1<<32-1), 10)) - } - d, ok := constant.Int64Val(constant.ToInt(value)) - if !ok { - panic("could not get exact int") - } - return c.formatExpr("%s", strconv.FormatInt(d, 10)) - case isFloat(basic): - f, _ := constant.Float64Val(value) - return c.formatExpr("%s", strconv.FormatFloat(f, 'g', -1, 64)) - case isComplex(basic): - r, _ := constant.Float64Val(constant.Real(value)) - i, _ := constant.Float64Val(constant.Imag(value)) - if basic.Kind() == types.UntypedComplex { - exprType = types.Typ[types.Complex128] - } - return c.formatExpr("new %s(%s, %s)", c.typeName(exprType), strconv.FormatFloat(r, 'g', -1, 64), strconv.FormatFloat(i, 'g', -1, 64)) - case isString(basic): - return c.formatExpr("%s", encodeString(constant.StringVal(value))) - default: - panic("Unhandled constant type: " + basic.String()) - } - } - - var obj types.Object - switch e := expr.(type) { - case *ast.SelectorExpr: - obj = c.p.Uses[e.Sel] - case *ast.Ident: - obj = c.p.Defs[e] - if obj == nil { - obj = c.p.Uses[e] - } - } - - if obj != nil && typesutil.IsJsPackage(obj.Pkg()) { - switch obj.Name() { - case "Global": - return c.formatExpr("$global") - case "Module": - return c.formatExpr("$module") - case "Undefined": - return c.formatExpr("undefined") - } - } - - switch e := expr.(type) { - case *ast.CompositeLit: - if ptrType, isPointer := exprType.(*types.Pointer); isPointer { - exprType = ptrType.Elem() - } - - collectIndexedElements := func(elementType types.Type) []string { - var elements []string - i := 0 - zero := c.translateExpr(c.zeroValue(elementType)).String() - for _, element := range e.Elts { - if kve, isKve := element.(*ast.KeyValueExpr); isKve { - key, ok := constant.Int64Val(constant.ToInt(c.p.Types[kve.Key].Value)) - if !ok { - panic("could not get exact int") - } - i = int(key) - element = kve.Value - } - for len(elements) <= i { - elements = append(elements, zero) - } - elements[i] = c.translateImplicitConversionWithCloning(element, elementType).String() - i++ - } - return elements - } - - switch t := exprType.Underlying().(type) { - case *types.Array: - elements := collectIndexedElements(t.Elem()) - if len(elements) == 0 { - return c.formatExpr("%s.zero()", c.typeName(t)) - } - zero := c.translateExpr(c.zeroValue(t.Elem())).String() - for len(elements) < int(t.Len()) { - elements = append(elements, zero) - } - return c.formatExpr(`$toNativeArray(%s, [%s])`, typeKind(t.Elem()), strings.Join(elements, ", ")) - case *types.Slice: - return c.formatExpr("new %s([%s])", c.typeName(exprType), strings.Join(collectIndexedElements(t.Elem()), ", ")) - case *types.Map: - entries := make([]string, len(e.Elts)) - for i, element := range e.Elts { - kve := element.(*ast.KeyValueExpr) - entries[i] = fmt.Sprintf("{ k: %s, v: %s }", c.translateImplicitConversionWithCloning(kve.Key, t.Key()), c.translateImplicitConversionWithCloning(kve.Value, t.Elem())) - } - return c.formatExpr("$makeMap(%s.keyFor, [%s])", c.typeName(t.Key()), strings.Join(entries, ", ")) - case *types.Struct: - elements := make([]string, t.NumFields()) - isKeyValue := true - if len(e.Elts) != 0 { - _, isKeyValue = e.Elts[0].(*ast.KeyValueExpr) - } - if !isKeyValue { - for i, element := range e.Elts { - elements[i] = c.translateImplicitConversionWithCloning(element, t.Field(i).Type()).String() - } - } - if isKeyValue { - for i := range elements { - elements[i] = c.translateExpr(c.zeroValue(t.Field(i).Type())).String() - } - for _, element := range e.Elts { - kve := element.(*ast.KeyValueExpr) - for j := range elements { - if kve.Key.(*ast.Ident).Name == t.Field(j).Name() { - elements[j] = c.translateImplicitConversionWithCloning(kve.Value, t.Field(j).Type()).String() - break - } - } - } - } - return c.formatExpr("new %s.ptr(%s)", c.typeName(exprType), strings.Join(elements, ", ")) - default: - panic(fmt.Sprintf("Unhandled CompositeLit type: %T\n", t)) - } - - case *ast.FuncLit: - _, fun := translateFunction(e.Type, nil, e.Body, c, exprType.(*types.Signature), c.p.FuncLitInfos[e], "") - if len(c.p.escapingVars) != 0 { - names := make([]string, 0, len(c.p.escapingVars)) - for obj := range c.p.escapingVars { - names = append(names, c.p.objectNames[obj]) - } - sort.Strings(names) - list := strings.Join(names, ", ") - return c.formatExpr("(function(%s) { return %s; })(%s)", list, fun, list) - } - return c.formatExpr("(%s)", fun) - - case *ast.UnaryExpr: - t := c.p.TypeOf(e.X) - switch e.Op { - case token.AND: - if typesutil.IsJsObject(exprType) { - return c.formatExpr("%e.object", e.X) - } - - switch t.Underlying().(type) { - case *types.Struct, *types.Array: - return c.translateExpr(e.X) - } - - switch x := astutil.RemoveParens(e.X).(type) { - case *ast.CompositeLit: - return c.formatExpr("$newDataPointer(%e, %s)", x, c.typeName(c.p.TypeOf(e))) - case *ast.Ident: - obj := c.p.Uses[x].(*types.Var) - if c.p.escapingVars[obj] { - return c.formatExpr("(%1s.$ptr || (%1s.$ptr = new %2s(function() { return this.$target[0]; }, function($v) { this.$target[0] = $v; }, %1s)))", c.p.objectNames[obj], c.typeName(exprType)) - } - return c.formatExpr(`(%1s || (%1s = new %2s(function() { return %3s; }, function($v) { %4s })))`, c.varPtrName(obj), c.typeName(exprType), c.objectName(obj), c.translateAssign(x, c.newIdent("$v", exprType), false)) - case *ast.SelectorExpr: - sel, ok := c.p.SelectionOf(x) - if !ok { - // qualified identifier - obj := c.p.Uses[x.Sel].(*types.Var) - return c.formatExpr(`(%1s || (%1s = new %2s(function() { return %3s; }, function($v) { %4s })))`, c.varPtrName(obj), c.typeName(exprType), c.objectName(obj), c.translateAssign(x, c.newIdent("$v", exprType), false)) - } - newSel := &ast.SelectorExpr{X: c.newIdent("this.$target", c.p.TypeOf(x.X)), Sel: x.Sel} - c.setType(newSel, exprType) - c.p.additionalSelections[newSel] = sel - return c.formatExpr("(%1e.$ptr_%2s || (%1e.$ptr_%2s = new %3s(function() { return %4e; }, function($v) { %5s }, %1e)))", x.X, x.Sel.Name, c.typeName(exprType), newSel, c.translateAssign(newSel, c.newIdent("$v", exprType), false)) - case *ast.IndexExpr: - if _, ok := c.p.TypeOf(x.X).Underlying().(*types.Slice); ok { - return c.formatExpr("$indexPtr(%1e.$array, %1e.$offset + %2e, %3s)", x.X, x.Index, c.typeName(exprType)) - } - return c.formatExpr("$indexPtr(%e, %e, %s)", x.X, x.Index, c.typeName(exprType)) - case *ast.StarExpr: - return c.translateExpr(x.X) - default: - panic(fmt.Sprintf("Unhandled: %T\n", x)) - } - - case token.ARROW: - call := &ast.CallExpr{ - Fun: c.newIdent("$recv", types.NewSignature(nil, types.NewTuple(types.NewVar(0, nil, "", t)), types.NewTuple(types.NewVar(0, nil, "", exprType), types.NewVar(0, nil, "", types.Typ[types.Bool])), false)), - Args: []ast.Expr{e.X}, - } - c.Blocking[call] = true - if _, isTuple := exprType.(*types.Tuple); isTuple { - return c.formatExpr("%e", call) - } - return c.formatExpr("%e[0]", call) - } - - basic := t.Underlying().(*types.Basic) - switch e.Op { - case token.ADD: - return c.translateExpr(e.X) - case token.SUB: - switch { - case is64Bit(basic): - return c.formatExpr("new %1s(-%2h, -%2l)", c.typeName(t), e.X) - case isComplex(basic): - return c.formatExpr("new %1s(-%2r, -%2i)", c.typeName(t), e.X) - case isUnsigned(basic): - return c.fixNumber(c.formatExpr("-%e", e.X), basic) - default: - return c.formatExpr("-%e", e.X) - } - case token.XOR: - if is64Bit(basic) { - return c.formatExpr("new %1s(~%2h, ~%2l >>> 0)", c.typeName(t), e.X) - } - return c.fixNumber(c.formatExpr("~%e", e.X), basic) - case token.NOT: - return c.formatExpr("!%e", e.X) - default: - panic(e.Op) - } - - case *ast.BinaryExpr: - if e.Op == token.NEQ { - return c.formatExpr("!(%s)", c.translateExpr(&ast.BinaryExpr{ - X: e.X, - Op: token.EQL, - Y: e.Y, - })) - } - - t := c.p.TypeOf(e.X) - t2 := c.p.TypeOf(e.Y) - _, isInterface := t2.Underlying().(*types.Interface) - if isInterface || types.Identical(t, types.Typ[types.UntypedNil]) { - t = t2 - } - - if basic, isBasic := t.Underlying().(*types.Basic); isBasic && isNumeric(basic) { - if is64Bit(basic) { - switch e.Op { - case token.MUL: - return c.formatExpr("$mul64(%e, %e)", e.X, e.Y) - case token.QUO: - return c.formatExpr("$div64(%e, %e, false)", e.X, e.Y) - case token.REM: - return c.formatExpr("$div64(%e, %e, true)", e.X, e.Y) - case token.SHL: - return c.formatExpr("$shiftLeft64(%e, %f)", e.X, e.Y) - case token.SHR: - return c.formatExpr("$shiftRight%s(%e, %f)", toJavaScriptType(basic), e.X, e.Y) - case token.EQL: - return c.formatExpr("(%1h === %2h && %1l === %2l)", e.X, e.Y) - case token.LSS: - return c.formatExpr("(%1h < %2h || (%1h === %2h && %1l < %2l))", e.X, e.Y) - case token.LEQ: - return c.formatExpr("(%1h < %2h || (%1h === %2h && %1l <= %2l))", e.X, e.Y) - case token.GTR: - return c.formatExpr("(%1h > %2h || (%1h === %2h && %1l > %2l))", e.X, e.Y) - case token.GEQ: - return c.formatExpr("(%1h > %2h || (%1h === %2h && %1l >= %2l))", e.X, e.Y) - case token.ADD, token.SUB: - return c.formatExpr("new %3s(%1h %4t %2h, %1l %4t %2l)", e.X, e.Y, c.typeName(t), e.Op) - case token.AND, token.OR, token.XOR: - return c.formatExpr("new %3s(%1h %4t %2h, (%1l %4t %2l) >>> 0)", e.X, e.Y, c.typeName(t), e.Op) - case token.AND_NOT: - return c.formatExpr("new %3s(%1h & ~%2h, (%1l & ~%2l) >>> 0)", e.X, e.Y, c.typeName(t)) - default: - panic(e.Op) - } - } - - if isComplex(basic) { - switch e.Op { - case token.EQL: - return c.formatExpr("(%1r === %2r && %1i === %2i)", e.X, e.Y) - case token.ADD, token.SUB: - return c.formatExpr("new %3s(%1r %4t %2r, %1i %4t %2i)", e.X, e.Y, c.typeName(t), e.Op) - case token.MUL: - return c.formatExpr("new %3s(%1r * %2r - %1i * %2i, %1r * %2i + %1i * %2r)", e.X, e.Y, c.typeName(t)) - case token.QUO: - return c.formatExpr("$divComplex(%e, %e)", e.X, e.Y) - default: - panic(e.Op) - } - } - - switch e.Op { - case token.EQL: - return c.formatParenExpr("%e === %e", e.X, e.Y) - case token.LSS, token.LEQ, token.GTR, token.GEQ: - return c.formatExpr("%e %t %e", e.X, e.Op, e.Y) - case token.ADD, token.SUB: - return c.fixNumber(c.formatExpr("%e %t %e", e.X, e.Op, e.Y), basic) - case token.MUL: - switch basic.Kind() { - case types.Int32, types.Int: - return c.formatParenExpr("$imul(%e, %e)", e.X, e.Y) - case types.Uint32, types.Uintptr: - return c.formatParenExpr("$imul(%e, %e) >>> 0", e.X, e.Y) - } - return c.fixNumber(c.formatExpr("%e * %e", e.X, e.Y), basic) - case token.QUO: - if isInteger(basic) { - // cut off decimals - shift := ">>" - if isUnsigned(basic) { - shift = ">>>" - } - return c.formatExpr(`(%1s = %2e / %3e, (%1s === %1s && %1s !== 1/0 && %1s !== -1/0) ? %1s %4s 0 : $throwRuntimeError("integer divide by zero"))`, c.newVariable("_q"), e.X, e.Y, shift) - } - if basic.Kind() == types.Float32 { - return c.fixNumber(c.formatExpr("%e / %e", e.X, e.Y), basic) - } - return c.formatExpr("%e / %e", e.X, e.Y) - case token.REM: - return c.formatExpr(`(%1s = %2e %% %3e, %1s === %1s ? %1s : $throwRuntimeError("integer divide by zero"))`, c.newVariable("_r"), e.X, e.Y) - case token.SHL, token.SHR: - op := e.Op.String() - if e.Op == token.SHR && isUnsigned(basic) { - op = ">>>" - } - if v := c.p.Types[e.Y].Value; v != nil { - i, _ := constant.Uint64Val(constant.ToInt(v)) - if i >= 32 { - return c.formatExpr("0") - } - return c.fixNumber(c.formatExpr("%e %s %s", e.X, op, strconv.FormatUint(i, 10)), basic) - } - if e.Op == token.SHR && !isUnsigned(basic) { - return c.fixNumber(c.formatParenExpr("%e >> $min(%f, 31)", e.X, e.Y), basic) - } - y := c.newVariable("y") - return c.fixNumber(c.formatExpr("(%s = %f, %s < 32 ? (%e %s %s) : 0)", y, e.Y, y, e.X, op, y), basic) - case token.AND, token.OR: - if isUnsigned(basic) { - return c.formatParenExpr("(%e %t %e) >>> 0", e.X, e.Op, e.Y) - } - return c.formatParenExpr("%e %t %e", e.X, e.Op, e.Y) - case token.AND_NOT: - return c.fixNumber(c.formatParenExpr("%e & ~%e", e.X, e.Y), basic) - case token.XOR: - return c.fixNumber(c.formatParenExpr("%e ^ %e", e.X, e.Y), basic) - default: - panic(e.Op) - } - } - - switch e.Op { - case token.ADD, token.LSS, token.LEQ, token.GTR, token.GEQ: - return c.formatExpr("%e %t %e", e.X, e.Op, e.Y) - case token.LAND: - if c.Blocking[e.Y] { - skipCase := c.caseCounter - c.caseCounter++ - resultVar := c.newVariable("_v") - c.Printf("if (!(%s)) { %s = false; $s = %d; continue s; }", c.translateExpr(e.X), resultVar, skipCase) - c.Printf("%s = %s; case %d:", resultVar, c.translateExpr(e.Y), skipCase) - return c.formatExpr("%s", resultVar) - } - return c.formatExpr("%e && %e", e.X, e.Y) - case token.LOR: - if c.Blocking[e.Y] { - skipCase := c.caseCounter - c.caseCounter++ - resultVar := c.newVariable("_v") - c.Printf("if (%s) { %s = true; $s = %d; continue s; }", c.translateExpr(e.X), resultVar, skipCase) - c.Printf("%s = %s; case %d:", resultVar, c.translateExpr(e.Y), skipCase) - return c.formatExpr("%s", resultVar) - } - return c.formatExpr("%e || %e", e.X, e.Y) - case token.EQL: - switch u := t.Underlying().(type) { - case *types.Array, *types.Struct: - return c.formatExpr("$equal(%e, %e, %s)", e.X, e.Y, c.typeName(t)) - case *types.Interface: - return c.formatExpr("$interfaceIsEqual(%s, %s)", c.translateImplicitConversion(e.X, t), c.translateImplicitConversion(e.Y, t)) - case *types.Pointer: - if _, ok := u.Elem().Underlying().(*types.Array); ok { - return c.formatExpr("$equal(%s, %s, %s)", c.translateImplicitConversion(e.X, t), c.translateImplicitConversion(e.Y, t), c.typeName(u.Elem())) - } - case *types.Basic: - if isBoolean(u) { - if b, ok := analysis.BoolValue(e.X, c.p.Info.Info); ok && b { - return c.translateExpr(e.Y) - } - if b, ok := analysis.BoolValue(e.Y, c.p.Info.Info); ok && b { - return c.translateExpr(e.X) - } - } - } - return c.formatExpr("%s === %s", c.translateImplicitConversion(e.X, t), c.translateImplicitConversion(e.Y, t)) - default: - panic(e.Op) - } - - case *ast.ParenExpr: - return c.formatParenExpr("%e", e.X) - - case *ast.IndexExpr: - switch t := c.p.TypeOf(e.X).Underlying().(type) { - case *types.Array, *types.Pointer: - pattern := rangeCheck("%1e[%2f]", c.p.Types[e.Index].Value != nil, true) - if _, ok := t.(*types.Pointer); ok { // check pointer for nix (attribute getter causes a panic) - pattern = `(%1e.nilCheck, ` + pattern + `)` - } - return c.formatExpr(pattern, e.X, e.Index) - case *types.Slice: - return c.formatExpr(rangeCheck("%1e.$array[%1e.$offset + %2f]", c.p.Types[e.Index].Value != nil, false), e.X, e.Index) - case *types.Map: - if typesutil.IsJsObject(c.p.TypeOf(e.Index)) { - c.p.errList = append(c.p.errList, types.Error{Fset: c.p.fileSet, Pos: e.Index.Pos(), Msg: "cannot use js.Object as map key"}) - } - key := fmt.Sprintf("%s.keyFor(%s)", c.typeName(t.Key()), c.translateImplicitConversion(e.Index, t.Key())) - if _, isTuple := exprType.(*types.Tuple); isTuple { - return c.formatExpr(`(%1s = %2e[%3s], %1s !== undefined ? [%1s.v, true] : [%4e, false])`, c.newVariable("_entry"), e.X, key, c.zeroValue(t.Elem())) - } - return c.formatExpr(`(%1s = %2e[%3s], %1s !== undefined ? %1s.v : %4e)`, c.newVariable("_entry"), e.X, key, c.zeroValue(t.Elem())) - case *types.Basic: - return c.formatExpr("%e.charCodeAt(%f)", e.X, e.Index) - default: - panic(fmt.Sprintf("Unhandled IndexExpr: %T\n", t)) - } - - case *ast.SliceExpr: - if b, isBasic := c.p.TypeOf(e.X).Underlying().(*types.Basic); isBasic && isString(b) { - switch { - case e.Low == nil && e.High == nil: - return c.translateExpr(e.X) - case e.Low == nil: - return c.formatExpr("$substring(%e, 0, %f)", e.X, e.High) - case e.High == nil: - return c.formatExpr("$substring(%e, %f)", e.X, e.Low) - default: - return c.formatExpr("$substring(%e, %f, %f)", e.X, e.Low, e.High) - } - } - slice := c.translateConversionToSlice(e.X, exprType) - switch { - case e.Low == nil && e.High == nil: - return c.formatExpr("%s", slice) - case e.Low == nil: - if e.Max != nil { - return c.formatExpr("$subslice(%s, 0, %f, %f)", slice, e.High, e.Max) - } - return c.formatExpr("$subslice(%s, 0, %f)", slice, e.High) - case e.High == nil: - return c.formatExpr("$subslice(%s, %f)", slice, e.Low) - default: - if e.Max != nil { - return c.formatExpr("$subslice(%s, %f, %f, %f)", slice, e.Low, e.High, e.Max) - } - return c.formatExpr("$subslice(%s, %f, %f)", slice, e.Low, e.High) - } - - case *ast.SelectorExpr: - sel, ok := c.p.SelectionOf(e) - if !ok { - // qualified identifier - return c.formatExpr("%s", c.objectName(obj)) - } - - switch sel.Kind() { - case types.FieldVal: - fields, jsTag := c.translateSelection(sel, e.Pos()) - if jsTag != "" { - if _, ok := sel.Type().(*types.Signature); ok { - return c.formatExpr("$internalize(%1e.%2s%3s, %4s, %1e.%2s)", e.X, strings.Join(fields, "."), formatJSStructTagVal(jsTag), c.typeName(sel.Type())) - } - return c.internalize(c.formatExpr("%e.%s%s", e.X, strings.Join(fields, "."), formatJSStructTagVal(jsTag)), sel.Type()) - } - return c.formatExpr("%e.%s", e.X, strings.Join(fields, ".")) - case types.MethodVal: - return c.formatExpr(`$methodVal(%s, "%s")`, c.makeReceiver(e), sel.Obj().(*types.Func).Name()) - case types.MethodExpr: - if !sel.Obj().Exported() { - c.p.dependencies[sel.Obj()] = true - } - if _, ok := sel.Recv().Underlying().(*types.Interface); ok { - return c.formatExpr(`$ifaceMethodExpr("%s")`, sel.Obj().(*types.Func).Name()) - } - return c.formatExpr(`$methodExpr(%s, "%s")`, c.typeName(sel.Recv()), sel.Obj().(*types.Func).Name()) - default: - panic(fmt.Sprintf("unexpected sel.Kind(): %T", sel.Kind())) - } - - case *ast.CallExpr: - plainFun := astutil.RemoveParens(e.Fun) - - if astutil.IsTypeExpr(plainFun, c.p.Info.Info) { - return c.formatExpr("(%s)", c.translateConversion(e.Args[0], c.p.TypeOf(plainFun))) - } - - sig := c.p.TypeOf(plainFun).Underlying().(*types.Signature) - - switch f := plainFun.(type) { - case *ast.Ident: - obj := c.p.Uses[f] - if o, ok := obj.(*types.Builtin); ok { - return c.translateBuiltin(o.Name(), sig, e.Args, e.Ellipsis.IsValid()) - } - if typesutil.IsJsPackage(obj.Pkg()) && obj.Name() == "InternalObject" { - return c.translateExpr(e.Args[0]) - } - return c.translateCall(e, sig, c.translateExpr(f)) - - case *ast.SelectorExpr: - sel, ok := c.p.SelectionOf(f) - if !ok { - // qualified identifier - obj := c.p.Uses[f.Sel] - if typesutil.IsJsPackage(obj.Pkg()) { - switch obj.Name() { - case "Debugger": - return c.formatExpr("debugger") - case "InternalObject": - return c.translateExpr(e.Args[0]) - } - } - return c.translateCall(e, sig, c.translateExpr(f)) - } - - externalizeExpr := func(e ast.Expr) string { - t := c.p.TypeOf(e) - if types.Identical(t, types.Typ[types.UntypedNil]) { - return "null" - } - return c.externalize(c.translateExpr(e).String(), t) - } - externalizeArgs := func(args []ast.Expr) string { - s := make([]string, len(args)) - for i, arg := range args { - s[i] = externalizeExpr(arg) - } - return strings.Join(s, ", ") - } - - switch sel.Kind() { - case types.MethodVal: - recv := c.makeReceiver(f) - declaredFuncRecv := sel.Obj().(*types.Func).Type().(*types.Signature).Recv().Type() - if typesutil.IsJsObject(declaredFuncRecv) { - globalRef := func(id string) string { - if recv.String() == "$global" && id[0] == '$' && len(id) > 1 { - return id - } - return recv.String() + "." + id - } - switch sel.Obj().Name() { - case "Get": - if id, ok := c.identifierConstant(e.Args[0]); ok { - return c.formatExpr("%s", globalRef(id)) - } - return c.formatExpr("%s[$externalize(%e, $String)]", recv, e.Args[0]) - case "Set": - if id, ok := c.identifierConstant(e.Args[0]); ok { - return c.formatExpr("%s = %s", globalRef(id), externalizeExpr(e.Args[1])) - } - return c.formatExpr("%s[$externalize(%e, $String)] = %s", recv, e.Args[0], externalizeExpr(e.Args[1])) - case "Delete": - return c.formatExpr("delete %s[$externalize(%e, $String)]", recv, e.Args[0]) - case "Length": - return c.formatExpr("$parseInt(%s.length)", recv) - case "Index": - return c.formatExpr("%s[%e]", recv, e.Args[0]) - case "SetIndex": - return c.formatExpr("%s[%e] = %s", recv, e.Args[0], externalizeExpr(e.Args[1])) - case "Call": - if id, ok := c.identifierConstant(e.Args[0]); ok { - if e.Ellipsis.IsValid() { - objVar := c.newVariable("obj") - return c.formatExpr("(%s = %s, %s.%s.apply(%s, %s))", objVar, recv, objVar, id, objVar, externalizeExpr(e.Args[1])) - } - return c.formatExpr("%s(%s)", globalRef(id), externalizeArgs(e.Args[1:])) - } - if e.Ellipsis.IsValid() { - objVar := c.newVariable("obj") - return c.formatExpr("(%s = %s, %s[$externalize(%e, $String)].apply(%s, %s))", objVar, recv, objVar, e.Args[0], objVar, externalizeExpr(e.Args[1])) - } - return c.formatExpr("%s[$externalize(%e, $String)](%s)", recv, e.Args[0], externalizeArgs(e.Args[1:])) - case "Invoke": - if e.Ellipsis.IsValid() { - return c.formatExpr("%s.apply(undefined, %s)", recv, externalizeExpr(e.Args[0])) - } - return c.formatExpr("%s(%s)", recv, externalizeArgs(e.Args)) - case "New": - if e.Ellipsis.IsValid() { - return c.formatExpr("new ($global.Function.prototype.bind.apply(%s, [undefined].concat(%s)))", recv, externalizeExpr(e.Args[0])) - } - return c.formatExpr("new (%s)(%s)", recv, externalizeArgs(e.Args)) - case "Bool": - return c.internalize(recv, types.Typ[types.Bool]) - case "String": - return c.internalize(recv, types.Typ[types.String]) - case "Int": - return c.internalize(recv, types.Typ[types.Int]) - case "Int64": - return c.internalize(recv, types.Typ[types.Int64]) - case "Uint64": - return c.internalize(recv, types.Typ[types.Uint64]) - case "Float": - return c.internalize(recv, types.Typ[types.Float64]) - case "Interface": - return c.internalize(recv, types.NewInterface(nil, nil)) - case "Unsafe": - return recv - default: - panic("Invalid js package object: " + sel.Obj().Name()) - } - } - - methodName := sel.Obj().Name() - if reservedKeywords[methodName] { - methodName += "$" - } - return c.translateCall(e, sig, c.formatExpr("%s.%s", recv, methodName)) - - case types.FieldVal: - fields, jsTag := c.translateSelection(sel, f.Pos()) - if jsTag != "" { - call := c.formatExpr("%e.%s%s(%s)", f.X, strings.Join(fields, "."), formatJSStructTagVal(jsTag), externalizeArgs(e.Args)) - switch sig.Results().Len() { - case 0: - return call - case 1: - return c.internalize(call, sig.Results().At(0).Type()) - default: - c.p.errList = append(c.p.errList, types.Error{Fset: c.p.fileSet, Pos: f.Pos(), Msg: "field with js tag can not have func type with multiple results"}) - } - } - return c.translateCall(e, sig, c.formatExpr("%e.%s", f.X, strings.Join(fields, "."))) - - case types.MethodExpr: - return c.translateCall(e, sig, c.translateExpr(f)) - - default: - panic(fmt.Sprintf("unexpected sel.Kind(): %T", sel.Kind())) - } - default: - return c.translateCall(e, sig, c.translateExpr(plainFun)) - } - - case *ast.StarExpr: - if typesutil.IsJsObject(c.p.TypeOf(e.X)) { - return c.formatExpr("new $jsObjectPtr(%e)", e.X) - } - if c1, isCall := e.X.(*ast.CallExpr); isCall && len(c1.Args) == 1 { - if c2, isCall := c1.Args[0].(*ast.CallExpr); isCall && len(c2.Args) == 1 && types.Identical(c.p.TypeOf(c2.Fun), types.Typ[types.UnsafePointer]) { - if unary, isUnary := c2.Args[0].(*ast.UnaryExpr); isUnary && unary.Op == token.AND { - return c.translateExpr(unary.X) // unsafe conversion - } - } - } - switch exprType.Underlying().(type) { - case *types.Struct, *types.Array: - return c.translateExpr(e.X) - } - return c.formatExpr("%e.$get()", e.X) - - case *ast.TypeAssertExpr: - if e.Type == nil { - return c.translateExpr(e.X) - } - t := c.p.TypeOf(e.Type) - if _, isTuple := exprType.(*types.Tuple); isTuple { - return c.formatExpr("$assertType(%e, %s, true)", e.X, c.typeName(t)) - } - return c.formatExpr("$assertType(%e, %s)", e.X, c.typeName(t)) - - case *ast.Ident: - if e.Name == "_" { - panic("Tried to translate underscore identifier.") - } - switch o := obj.(type) { - case *types.Var, *types.Const: - return c.formatExpr("%s", c.objectName(o)) - case *types.Func: - return c.formatExpr("%s", c.objectName(o)) - case *types.TypeName: - return c.formatExpr("%s", c.typeName(o.Type())) - case *types.Nil: - if typesutil.IsJsObject(exprType) { - return c.formatExpr("null") - } - switch t := exprType.Underlying().(type) { - case *types.Basic: - if t.Kind() != types.UnsafePointer { - panic("unexpected basic type") - } - return c.formatExpr("0") - case *types.Slice, *types.Pointer: - return c.formatExpr("%s.nil", c.typeName(exprType)) - case *types.Chan: - return c.formatExpr("$chanNil") - case *types.Map: - return c.formatExpr("false") - case *types.Interface: - return c.formatExpr("$ifaceNil") - case *types.Signature: - return c.formatExpr("$throwNilPointerError") - default: - panic(fmt.Sprintf("unexpected type: %T", t)) - } - default: - panic(fmt.Sprintf("Unhandled object: %T\n", o)) - } - - case nil: - return c.formatExpr("") - - default: - panic(fmt.Sprintf("Unhandled expression: %T\n", e)) - - } -} - -func (c *funcContext) translateCall(e *ast.CallExpr, sig *types.Signature, fun *expression) *expression { - args := c.translateArgs(sig, e.Args, e.Ellipsis.IsValid()) - if c.Blocking[e] { - resumeCase := c.caseCounter - c.caseCounter++ - returnVar := "$r" - if sig.Results().Len() != 0 { - returnVar = c.newVariable("_r") - } - c.Printf("%[1]s = %[2]s(%[3]s); /* */ $s = %[4]d; case %[4]d: if($c) { $c = false; %[1]s = %[1]s.$blk(); } if (%[1]s && %[1]s.$blk !== undefined) { break s; }", returnVar, fun, strings.Join(args, ", "), resumeCase) - if sig.Results().Len() != 0 { - return c.formatExpr("%s", returnVar) - } - return c.formatExpr("") - } - return c.formatExpr("%s(%s)", fun, strings.Join(args, ", ")) -} - -func (c *funcContext) makeReceiver(e *ast.SelectorExpr) *expression { - sel, _ := c.p.SelectionOf(e) - if !sel.Obj().Exported() { - c.p.dependencies[sel.Obj()] = true - } - - x := e.X - recvType := sel.Recv() - if len(sel.Index()) > 1 { - for _, index := range sel.Index()[:len(sel.Index())-1] { - if ptr, isPtr := recvType.(*types.Pointer); isPtr { - recvType = ptr.Elem() - } - s := recvType.Underlying().(*types.Struct) - recvType = s.Field(index).Type() - } - - fakeSel := &ast.SelectorExpr{X: x, Sel: ast.NewIdent("o")} - c.p.additionalSelections[fakeSel] = &fakeSelection{ - kind: types.FieldVal, - recv: sel.Recv(), - index: sel.Index()[:len(sel.Index())-1], - typ: recvType, - } - x = c.setType(fakeSel, recvType) - } - - _, isPointer := recvType.Underlying().(*types.Pointer) - methodsRecvType := sel.Obj().Type().(*types.Signature).Recv().Type() - _, pointerExpected := methodsRecvType.(*types.Pointer) - if !isPointer && pointerExpected { - recvType = types.NewPointer(recvType) - x = c.setType(&ast.UnaryExpr{Op: token.AND, X: x}, recvType) - } - if isPointer && !pointerExpected { - x = c.setType(x, methodsRecvType) - } - - recv := c.translateImplicitConversionWithCloning(x, methodsRecvType) - if isWrapped(recvType) { - recv = c.formatExpr("new %s(%s)", c.typeName(methodsRecvType), recv) - } - return recv -} - -func (c *funcContext) translateBuiltin(name string, sig *types.Signature, args []ast.Expr, ellipsis bool) *expression { - switch name { - case "new": - t := sig.Results().At(0).Type().(*types.Pointer) - if c.p.Pkg.Path() == "syscall" && types.Identical(t.Elem().Underlying(), types.Typ[types.Uintptr]) { - return c.formatExpr("new Uint8Array(8)") - } - switch t.Elem().Underlying().(type) { - case *types.Struct, *types.Array: - return c.formatExpr("%e", c.zeroValue(t.Elem())) - default: - return c.formatExpr("$newDataPointer(%e, %s)", c.zeroValue(t.Elem()), c.typeName(t)) - } - case "make": - switch argType := c.p.TypeOf(args[0]).Underlying().(type) { - case *types.Slice: - t := c.typeName(c.p.TypeOf(args[0])) - if len(args) == 3 { - return c.formatExpr("$makeSlice(%s, %f, %f)", t, args[1], args[2]) - } - return c.formatExpr("$makeSlice(%s, %f)", t, args[1]) - case *types.Map: - if len(args) == 2 && c.p.Types[args[1]].Value == nil { - return c.formatExpr(`((%1f < 0 || %1f > 2147483647) ? $throwRuntimeError("makemap: size out of range") : {})`, args[1]) - } - return c.formatExpr("{}") - case *types.Chan: - length := "0" - if len(args) == 2 { - length = c.formatExpr("%f", args[1]).String() - } - return c.formatExpr("new $Chan(%s, %s)", c.typeName(c.p.TypeOf(args[0]).Underlying().(*types.Chan).Elem()), length) - default: - panic(fmt.Sprintf("Unhandled make type: %T\n", argType)) - } - case "len": - switch argType := c.p.TypeOf(args[0]).Underlying().(type) { - case *types.Basic: - return c.formatExpr("%e.length", args[0]) - case *types.Slice: - return c.formatExpr("%e.$length", args[0]) - case *types.Pointer: - return c.formatExpr("(%e, %d)", args[0], argType.Elem().(*types.Array).Len()) - case *types.Map: - return c.formatExpr("$keys(%e).length", args[0]) - case *types.Chan: - return c.formatExpr("%e.$buffer.length", args[0]) - // length of array is constant - default: - panic(fmt.Sprintf("Unhandled len type: %T\n", argType)) - } - case "cap": - switch argType := c.p.TypeOf(args[0]).Underlying().(type) { - case *types.Slice, *types.Chan: - return c.formatExpr("%e.$capacity", args[0]) - case *types.Pointer: - return c.formatExpr("(%e, %d)", args[0], argType.Elem().(*types.Array).Len()) - // capacity of array is constant - default: - panic(fmt.Sprintf("Unhandled cap type: %T\n", argType)) - } - case "panic": - return c.formatExpr("$panic(%s)", c.translateImplicitConversion(args[0], types.NewInterface(nil, nil))) - case "append": - if ellipsis || len(args) == 1 { - argStr := c.translateArgs(sig, args, ellipsis) - return c.formatExpr("$appendSlice(%s, %s)", argStr[0], argStr[1]) - } - sliceType := sig.Results().At(0).Type().Underlying().(*types.Slice) - return c.formatExpr("$append(%e, %s)", args[0], strings.Join(c.translateExprSlice(args[1:], sliceType.Elem()), ", ")) - case "delete": - keyType := c.p.TypeOf(args[0]).Underlying().(*types.Map).Key() - return c.formatExpr(`delete %e[%s.keyFor(%s)]`, args[0], c.typeName(keyType), c.translateImplicitConversion(args[1], keyType)) - case "copy": - if basic, isBasic := c.p.TypeOf(args[1]).Underlying().(*types.Basic); isBasic && isString(basic) { - return c.formatExpr("$copyString(%e, %e)", args[0], args[1]) - } - return c.formatExpr("$copySlice(%e, %e)", args[0], args[1]) - case "print", "println": - return c.formatExpr("console.log(%s)", strings.Join(c.translateExprSlice(args, nil), ", ")) - case "complex": - argStr := c.translateArgs(sig, args, ellipsis) - return c.formatExpr("new %s(%s, %s)", c.typeName(sig.Results().At(0).Type()), argStr[0], argStr[1]) - case "real": - return c.formatExpr("%e.$real", args[0]) - case "imag": - return c.formatExpr("%e.$imag", args[0]) - case "recover": - return c.formatExpr("$recover()") - case "close": - return c.formatExpr(`$close(%e)`, args[0]) - default: - panic(fmt.Sprintf("Unhandled builtin: %s\n", name)) - } -} - -func (c *funcContext) identifierConstant(expr ast.Expr) (string, bool) { - val := c.p.Types[expr].Value - if val == nil { - return "", false - } - s := constant.StringVal(val) - if len(s) == 0 { - return "", false - } - for i, c := range s { - if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') || c == '_' || c == '$') { - return "", false - } - } - return s, true -} - -func (c *funcContext) translateExprSlice(exprs []ast.Expr, desiredType types.Type) []string { - parts := make([]string, len(exprs)) - for i, expr := range exprs { - parts[i] = c.translateImplicitConversion(expr, desiredType).String() - } - return parts -} - -func (c *funcContext) translateConversion(expr ast.Expr, desiredType types.Type) *expression { - exprType := c.p.TypeOf(expr) - if types.Identical(exprType, desiredType) { - return c.translateExpr(expr) - } - - if c.p.Pkg.Path() == "reflect" { - if call, isCall := expr.(*ast.CallExpr); isCall && types.Identical(c.p.TypeOf(call.Fun), types.Typ[types.UnsafePointer]) { - if ptr, isPtr := desiredType.(*types.Pointer); isPtr { - if named, isNamed := ptr.Elem().(*types.Named); isNamed { - switch named.Obj().Name() { - case "arrayType", "chanType", "funcType", "interfaceType", "mapType", "ptrType", "sliceType", "structType": - return c.formatExpr("%e.kindType", call.Args[0]) // unsafe conversion - default: - return c.translateExpr(expr) - } - } - } - } - } - - switch t := desiredType.Underlying().(type) { - case *types.Basic: - switch { - case isInteger(t): - basicExprType := exprType.Underlying().(*types.Basic) - switch { - case is64Bit(t): - if !is64Bit(basicExprType) { - if basicExprType.Kind() == types.Uintptr { // this might be an Object returned from reflect.Value.Pointer() - return c.formatExpr("new %1s(0, %2e.constructor === Number ? %2e : 1)", c.typeName(desiredType), expr) - } - return c.formatExpr("new %s(0, %e)", c.typeName(desiredType), expr) - } - return c.formatExpr("new %1s(%2h, %2l)", c.typeName(desiredType), expr) - case is64Bit(basicExprType): - if !isUnsigned(t) && !isUnsigned(basicExprType) { - return c.fixNumber(c.formatParenExpr("%1l + ((%1h >> 31) * 4294967296)", expr), t) - } - return c.fixNumber(c.formatExpr("%s.$low", c.translateExpr(expr)), t) - case isFloat(basicExprType): - return c.formatParenExpr("%e >> 0", expr) - case types.Identical(exprType, types.Typ[types.UnsafePointer]): - return c.translateExpr(expr) - default: - return c.fixNumber(c.translateExpr(expr), t) - } - case isFloat(t): - if t.Kind() == types.Float32 && exprType.Underlying().(*types.Basic).Kind() == types.Float64 { - return c.formatExpr("$fround(%e)", expr) - } - return c.formatExpr("%f", expr) - case isComplex(t): - return c.formatExpr("new %1s(%2r, %2i)", c.typeName(desiredType), expr) - case isString(t): - value := c.translateExpr(expr) - switch et := exprType.Underlying().(type) { - case *types.Basic: - if is64Bit(et) { - value = c.formatExpr("%s.$low", value) - } - if isNumeric(et) { - return c.formatExpr("$encodeRune(%s)", value) - } - return value - case *types.Slice: - if types.Identical(et.Elem().Underlying(), types.Typ[types.Rune]) { - return c.formatExpr("$runesToString(%s)", value) - } - return c.formatExpr("$bytesToString(%s)", value) - default: - panic(fmt.Sprintf("Unhandled conversion: %v\n", et)) - } - case t.Kind() == types.UnsafePointer: - if unary, isUnary := expr.(*ast.UnaryExpr); isUnary && unary.Op == token.AND { - if indexExpr, isIndexExpr := unary.X.(*ast.IndexExpr); isIndexExpr { - return c.formatExpr("$sliceToArray(%s)", c.translateConversionToSlice(indexExpr.X, types.NewSlice(types.Typ[types.Uint8]))) - } - if ident, isIdent := unary.X.(*ast.Ident); isIdent && ident.Name == "_zero" { - return c.formatExpr("new Uint8Array(0)") - } - } - if ptr, isPtr := c.p.TypeOf(expr).(*types.Pointer); c.p.Pkg.Path() == "syscall" && isPtr { - if s, isStruct := ptr.Elem().Underlying().(*types.Struct); isStruct { - array := c.newVariable("_array") - target := c.newVariable("_struct") - c.Printf("%s = new Uint8Array(%d);", array, sizes32.Sizeof(s)) - c.Delayed(func() { - c.Printf("%s = %s, %s;", target, c.translateExpr(expr), c.loadStruct(array, target, s)) - }) - return c.formatExpr("%s", array) - } - } - if call, ok := expr.(*ast.CallExpr); ok { - if id, ok := call.Fun.(*ast.Ident); ok && id.Name == "new" { - return c.formatExpr("new Uint8Array(%d)", int(sizes32.Sizeof(c.p.TypeOf(call.Args[0])))) - } - } - } - - case *types.Slice: - switch et := exprType.Underlying().(type) { - case *types.Basic: - if isString(et) { - if types.Identical(t.Elem().Underlying(), types.Typ[types.Rune]) { - return c.formatExpr("new %s($stringToRunes(%e))", c.typeName(desiredType), expr) - } - return c.formatExpr("new %s($stringToBytes(%e))", c.typeName(desiredType), expr) - } - case *types.Array, *types.Pointer: - return c.formatExpr("new %s(%e)", c.typeName(desiredType), expr) - } - - case *types.Pointer: - switch u := t.Elem().Underlying().(type) { - case *types.Array: - return c.translateExpr(expr) - case *types.Struct: - if c.p.Pkg.Path() == "syscall" && types.Identical(exprType, types.Typ[types.UnsafePointer]) { - array := c.newVariable("_array") - target := c.newVariable("_struct") - return c.formatExpr("(%s = %e, %s = %e, %s, %s)", array, expr, target, c.zeroValue(t.Elem()), c.loadStruct(array, target, u), target) - } - return c.formatExpr("$pointerOfStructConversion(%e, %s)", expr, c.typeName(t)) - } - - if !types.Identical(exprType, types.Typ[types.UnsafePointer]) { - exprTypeElem := exprType.Underlying().(*types.Pointer).Elem() - ptrVar := c.newVariable("_ptr") - getterConv := c.translateConversion(c.setType(&ast.StarExpr{X: c.newIdent(ptrVar, exprType)}, exprTypeElem), t.Elem()) - setterConv := c.translateConversion(c.newIdent("$v", t.Elem()), exprTypeElem) - return c.formatExpr("(%1s = %2e, new %3s(function() { return %4s; }, function($v) { %1s.$set(%5s); }, %1s.$target))", ptrVar, expr, c.typeName(desiredType), getterConv, setterConv) - } - - case *types.Interface: - if types.Identical(exprType, types.Typ[types.UnsafePointer]) { - return c.translateExpr(expr) - } - } - - return c.translateImplicitConversionWithCloning(expr, desiredType) -} - -func (c *funcContext) translateImplicitConversionWithCloning(expr ast.Expr, desiredType types.Type) *expression { - switch desiredType.Underlying().(type) { - case *types.Struct, *types.Array: - switch expr.(type) { - case nil, *ast.CompositeLit: - // nothing - default: - return c.formatExpr("$clone(%e, %s)", expr, c.typeName(desiredType)) - } - } - - return c.translateImplicitConversion(expr, desiredType) -} - -func (c *funcContext) translateImplicitConversion(expr ast.Expr, desiredType types.Type) *expression { - if desiredType == nil { - return c.translateExpr(expr) - } - - exprType := c.p.TypeOf(expr) - if types.Identical(exprType, desiredType) { - return c.translateExpr(expr) - } - - basicExprType, isBasicExpr := exprType.Underlying().(*types.Basic) - if isBasicExpr && basicExprType.Kind() == types.UntypedNil { - return c.formatExpr("%e", c.zeroValue(desiredType)) - } - - switch desiredType.Underlying().(type) { - case *types.Slice: - return c.formatExpr("$subslice(new %1s(%2e.$array), %2e.$offset, %2e.$offset + %2e.$length)", c.typeName(desiredType), expr) - - case *types.Interface: - if typesutil.IsJsObject(exprType) { - // wrap JS object into js.Object struct when converting to interface - return c.formatExpr("new $jsObjectPtr(%e)", expr) - } - if isWrapped(exprType) { - return c.formatExpr("new %s(%e)", c.typeName(exprType), expr) - } - if _, isStruct := exprType.Underlying().(*types.Struct); isStruct { - return c.formatExpr("new %1e.constructor.elem(%1e)", expr) - } - } - - return c.translateExpr(expr) -} - -func (c *funcContext) translateConversionToSlice(expr ast.Expr, desiredType types.Type) *expression { - switch c.p.TypeOf(expr).Underlying().(type) { - case *types.Array, *types.Pointer: - return c.formatExpr("new %s(%e)", c.typeName(desiredType), expr) - } - return c.translateExpr(expr) -} - -func (c *funcContext) loadStruct(array, target string, s *types.Struct) string { - view := c.newVariable("_view") - code := fmt.Sprintf("%s = new DataView(%s.buffer, %s.byteOffset)", view, array, array) - var fields []*types.Var - var collectFields func(s *types.Struct, path string) - collectFields = func(s *types.Struct, path string) { - for i := 0; i < s.NumFields(); i++ { - field := s.Field(i) - if fs, isStruct := field.Type().Underlying().(*types.Struct); isStruct { - collectFields(fs, path+"."+fieldName(s, i)) - continue - } - fields = append(fields, types.NewVar(0, nil, path+"."+fieldName(s, i), field.Type())) - } - } - collectFields(s, target) - offsets := sizes32.Offsetsof(fields) - for i, field := range fields { - switch t := field.Type().Underlying().(type) { - case *types.Basic: - if isNumeric(t) { - if is64Bit(t) { - code += fmt.Sprintf(", %s = new %s(%s.getUint32(%d, true), %s.getUint32(%d, true))", field.Name(), c.typeName(field.Type()), view, offsets[i]+4, view, offsets[i]) - break - } - code += fmt.Sprintf(", %s = %s.get%s(%d, true)", field.Name(), view, toJavaScriptType(t), offsets[i]) - } - case *types.Array: - code += fmt.Sprintf(`, %s = new ($nativeArray(%s))(%s.buffer, $min(%s.byteOffset + %d, %s.buffer.byteLength))`, field.Name(), typeKind(t.Elem()), array, array, offsets[i], array) - } - } - return code -} - -func (c *funcContext) fixNumber(value *expression, basic *types.Basic) *expression { - switch basic.Kind() { - case types.Int8: - return c.formatParenExpr("%s << 24 >> 24", value) - case types.Uint8: - return c.formatParenExpr("%s << 24 >>> 24", value) - case types.Int16: - return c.formatParenExpr("%s << 16 >> 16", value) - case types.Uint16: - return c.formatParenExpr("%s << 16 >>> 16", value) - case types.Int32, types.Int, types.UntypedInt: - return c.formatParenExpr("%s >> 0", value) - case types.Uint32, types.Uint, types.Uintptr: - return c.formatParenExpr("%s >>> 0", value) - case types.Float32: - return c.formatExpr("$fround(%s)", value) - case types.Float64: - return value - default: - panic(fmt.Sprintf("fixNumber: unhandled basic.Kind(): %s", basic.String())) - } -} - -func (c *funcContext) internalize(s *expression, t types.Type) *expression { - if typesutil.IsJsObject(t) { - return s - } - switch u := t.Underlying().(type) { - case *types.Basic: - switch { - case isBoolean(u): - return c.formatExpr("!!(%s)", s) - case isInteger(u) && !is64Bit(u): - return c.fixNumber(c.formatExpr("$parseInt(%s)", s), u) - case isFloat(u): - return c.formatExpr("$parseFloat(%s)", s) - } - } - return c.formatExpr("$internalize(%s, %s)", s, c.typeName(t)) -} - -func (c *funcContext) formatExpr(format string, a ...interface{}) *expression { - return c.formatExprInternal(format, a, false) -} - -func (c *funcContext) formatParenExpr(format string, a ...interface{}) *expression { - return c.formatExprInternal(format, a, true) -} - -func (c *funcContext) formatExprInternal(format string, a []interface{}, parens bool) *expression { - processFormat := func(f func(uint8, uint8, int)) { - n := 0 - for i := 0; i < len(format); i++ { - b := format[i] - if b == '%' { - i++ - k := format[i] - if k >= '0' && k <= '9' { - n = int(k - '0' - 1) - i++ - k = format[i] - } - f(0, k, n) - n++ - continue - } - f(b, 0, 0) - } - } - - counts := make([]int, len(a)) - processFormat(func(b, k uint8, n int) { - switch k { - case 'e', 'f', 'h', 'l', 'r', 'i': - counts[n]++ - } - }) - - out := bytes.NewBuffer(nil) - vars := make([]string, len(a)) - hasAssignments := false - for i, e := range a { - if counts[i] <= 1 { - continue - } - if _, isIdent := e.(*ast.Ident); isIdent { - continue - } - if val := c.p.Types[e.(ast.Expr)].Value; val != nil { - continue - } - if !hasAssignments { - hasAssignments = true - out.WriteByte('(') - parens = false - } - v := c.newVariable("x") - out.WriteString(v + " = " + c.translateExpr(e.(ast.Expr)).String() + ", ") - vars[i] = v - } - - processFormat(func(b, k uint8, n int) { - writeExpr := func(suffix string) { - if vars[n] != "" { - out.WriteString(vars[n] + suffix) - return - } - out.WriteString(c.translateExpr(a[n].(ast.Expr)).StringWithParens() + suffix) - } - switch k { - case 0: - out.WriteByte(b) - case 's': - if e, ok := a[n].(*expression); ok { - out.WriteString(e.StringWithParens()) - return - } - out.WriteString(a[n].(string)) - case 'd': - out.WriteString(strconv.Itoa(a[n].(int))) - case 't': - out.WriteString(a[n].(token.Token).String()) - case 'e': - e := a[n].(ast.Expr) - if val := c.p.Types[e].Value; val != nil { - out.WriteString(c.translateExpr(e).String()) - return - } - writeExpr("") - case 'f': - e := a[n].(ast.Expr) - if val := c.p.Types[e].Value; val != nil { - d, _ := constant.Int64Val(constant.ToInt(val)) - out.WriteString(strconv.FormatInt(d, 10)) - return - } - if is64Bit(c.p.TypeOf(e).Underlying().(*types.Basic)) { - out.WriteString("$flatten64(") - writeExpr("") - out.WriteString(")") - return - } - writeExpr("") - case 'h': - e := a[n].(ast.Expr) - if val := c.p.Types[e].Value; val != nil { - d, _ := constant.Uint64Val(constant.ToInt(val)) - if c.p.TypeOf(e).Underlying().(*types.Basic).Kind() == types.Int64 { - out.WriteString(strconv.FormatInt(int64(d)>>32, 10)) - return - } - out.WriteString(strconv.FormatUint(d>>32, 10)) - return - } - writeExpr(".$high") - case 'l': - if val := c.p.Types[a[n].(ast.Expr)].Value; val != nil { - d, _ := constant.Uint64Val(constant.ToInt(val)) - out.WriteString(strconv.FormatUint(d&(1<<32-1), 10)) - return - } - writeExpr(".$low") - case 'r': - if val := c.p.Types[a[n].(ast.Expr)].Value; val != nil { - r, _ := constant.Float64Val(constant.Real(val)) - out.WriteString(strconv.FormatFloat(r, 'g', -1, 64)) - return - } - writeExpr(".$real") - case 'i': - if val := c.p.Types[a[n].(ast.Expr)].Value; val != nil { - i, _ := constant.Float64Val(constant.Imag(val)) - out.WriteString(strconv.FormatFloat(i, 'g', -1, 64)) - return - } - writeExpr(".$imag") - case '%': - out.WriteRune('%') - default: - panic(fmt.Sprintf("formatExpr: %%%c%d", k, n)) - } - }) - - if hasAssignments { - out.WriteByte(')') - } - return &expression{str: out.String(), parens: parens} -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/filter/assign.go b/vendor/github.com/gopherjs/gopherjs/compiler/filter/assign.go deleted file mode 100644 index 2681d4c6a..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/filter/assign.go +++ /dev/null @@ -1,106 +0,0 @@ -package filter - -import ( - "go/ast" - "go/token" - "go/types" - - "github.com/gopherjs/gopherjs/compiler/astutil" -) - -func Assign(stmt ast.Stmt, info *types.Info, pkg *types.Package) ast.Stmt { - if s, ok := stmt.(*ast.AssignStmt); ok && s.Tok != token.ASSIGN && s.Tok != token.DEFINE { - var op token.Token - switch s.Tok { - case token.ADD_ASSIGN: - op = token.ADD - case token.SUB_ASSIGN: - op = token.SUB - case token.MUL_ASSIGN: - op = token.MUL - case token.QUO_ASSIGN: - op = token.QUO - case token.REM_ASSIGN: - op = token.REM - case token.AND_ASSIGN: - op = token.AND - case token.OR_ASSIGN: - op = token.OR - case token.XOR_ASSIGN: - op = token.XOR - case token.SHL_ASSIGN: - op = token.SHL - case token.SHR_ASSIGN: - op = token.SHR - case token.AND_NOT_ASSIGN: - op = token.AND_NOT - default: - panic(s.Tok) - } - - var list []ast.Stmt - - var viaTmpVars func(expr ast.Expr, name string) ast.Expr - viaTmpVars = func(expr ast.Expr, name string) ast.Expr { - switch e := astutil.RemoveParens(expr).(type) { - case *ast.IndexExpr: - return astutil.SetType(info, info.TypeOf(e), &ast.IndexExpr{ - X: viaTmpVars(e.X, "_slice"), - Index: viaTmpVars(e.Index, "_index"), - }) - - case *ast.SelectorExpr: - sel, ok := info.Selections[e] - if !ok { - // qualified identifier - return e - } - newSel := &ast.SelectorExpr{ - X: viaTmpVars(e.X, "_struct"), - Sel: e.Sel, - } - info.Selections[newSel] = sel - return astutil.SetType(info, info.TypeOf(e), newSel) - - case *ast.StarExpr: - return astutil.SetType(info, info.TypeOf(e), &ast.StarExpr{ - X: viaTmpVars(e.X, "_ptr"), - }) - - case *ast.Ident, *ast.BasicLit: - return e - - default: - tmpVar := astutil.NewIdent(name, info.TypeOf(e), info, pkg) - list = append(list, &ast.AssignStmt{ - Lhs: []ast.Expr{tmpVar}, - Tok: token.DEFINE, - Rhs: []ast.Expr{e}, - }) - return tmpVar - - } - } - - lhs := viaTmpVars(s.Lhs[0], "_val") - - list = append(list, &ast.AssignStmt{ - Lhs: []ast.Expr{lhs}, - Tok: token.ASSIGN, - Rhs: []ast.Expr{ - astutil.SetType(info, info.TypeOf(s.Lhs[0]), &ast.BinaryExpr{ - X: lhs, - Op: op, - Y: astutil.SetType(info, info.TypeOf(s.Rhs[0]), &ast.ParenExpr{ - X: s.Rhs[0], - }), - }), - }, - }) - - return &ast.BlockStmt{ - List: list, - } - } - return stmt -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/filter/incdecstmt.go b/vendor/github.com/gopherjs/gopherjs/compiler/filter/incdecstmt.go deleted file mode 100644 index c4899ba1d..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/filter/incdecstmt.go +++ /dev/null @@ -1,39 +0,0 @@ -package filter - -import ( - "go/ast" - "go/constant" - "go/token" - "go/types" -) - -func IncDecStmt(stmt ast.Stmt, info *types.Info) ast.Stmt { - if s, ok := stmt.(*ast.IncDecStmt); ok { - t := info.TypeOf(s.X) - if iExpr, isIExpr := s.X.(*ast.IndexExpr); isIExpr { - switch u := info.TypeOf(iExpr.X).Underlying().(type) { - case *types.Array: - t = u.Elem() - case *types.Slice: - t = u.Elem() - case *types.Map: - t = u.Elem() - } - } - - tok := token.ADD_ASSIGN - if s.Tok == token.DEC { - tok = token.SUB_ASSIGN - } - - one := &ast.BasicLit{Kind: token.INT} - info.Types[one] = types.TypeAndValue{Type: t, Value: constant.MakeInt64(1)} - - return &ast.AssignStmt{ - Lhs: []ast.Expr{s.X}, - Tok: tok, - Rhs: []ast.Expr{one}, - } - } - return stmt -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/doc.go b/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/doc.go deleted file mode 100644 index f57e84f74..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package gopherjspkg provides core GopherJS packages via a virtual filesystem. -// -// Core GopherJS packages are packages that are critical for GopherJS compiler -// operation. They are needed to build the Go standard library with GopherJS. -// Currently, they include: -// -// github.com/gopherjs/gopherjs/js -// github.com/gopherjs/gopherjs/nosync -// -package gopherjspkg - -//go:generate vfsgendev -source="github.com/gopherjs/gopherjs/compiler/gopherjspkg".FS -tag=gopherjsdev diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/fs.go b/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/fs.go deleted file mode 100644 index f6fb2622d..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/fs.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gopherjsdev - -package gopherjspkg - -import ( - "go/build" - "log" - "net/http" - "os" - pathpkg "path" - - "github.com/shurcooL/httpfs/filter" -) - -// FS is a virtual filesystem that contains core GopherJS packages. -var FS = filter.Keep( - http.Dir(importPathToDir("github.com/gopherjs/gopherjs")), - func(path string, fi os.FileInfo) bool { - return path == "/" || - path == "/js" || (pathpkg.Dir(path) == "/js" && !fi.IsDir()) || - path == "/nosync" || (pathpkg.Dir(path) == "/nosync" && !fi.IsDir()) - }, -) - -func importPathToDir(importPath string) string { - p, err := build.Import(importPath, "", build.FindOnly) - if err != nil { - log.Fatalln(err) - } - return p.Dir -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/fs_vfsdata.go b/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/fs_vfsdata.go deleted file mode 100644 index 8931d724b..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/gopherjspkg/fs_vfsdata.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by vfsgen; DO NOT EDIT. - -// +build !gopherjsdev - -package gopherjspkg - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - pathpkg "path" - "time" -) - -// FS is a virtual filesystem that contains core GopherJS packages. -var FS = func() http.FileSystem { - fs := vfsgen۰FS{ - "/": &vfsgen۰DirInfo{ - name: "/", - modTime: time.Date(2019, 4, 6, 11, 9, 42, 660657937, time.UTC), - }, - "/js": &vfsgen۰DirInfo{ - name: "js", - modTime: time.Date(2019, 3, 10, 16, 38, 53, 764271817, time.UTC), - }, - "/js/js.go": &vfsgen۰CompressedFileInfo{ - name: "js.go", - modTime: time.Date(2019, 3, 10, 16, 38, 53, 764987009, time.UTC), - uncompressedSize: 8002, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x59\x5f\x6f\xdc\x36\x12\x7f\x5e\x7d\x8a\x39\xa1\x40\x56\xcd\x56\xbe\xb6\x86\x51\x38\xe7\x87\xa4\xb9\xfa\xdc\x4b\xdc\x00\x6e\xd0\x07\x23\x30\xb8\xd2\x68\x97\xb1\x44\xea\x48\x6a\x37\x7b\xb6\xbf\xfb\x61\xf8\x47\x2b\xad\xa4\xc4\xbe\x24\x2f\x75\xc5\xe1\x6f\x7e\x9c\x19\xce\x1f\xee\xd1\x11\xbc\x63\xd9\x2d\x5b\x21\x7c\xd4\x50\x2b\xb9\xe1\x39\x6a\x28\x1a\x91\x19\x2e\x85\x86\x42\x2a\xe0\xc2\xa0\x62\x99\xe1\x62\x05\x5b\x6e\xd6\x20\x98\xe1\x1b\x84\xdf\xd9\x86\x5d\x65\x8a\xd7\x06\x5e\xbe\xbb\xd0\x29\xfc\xca\xca\x52\x83\x91\x60\xd6\xa8\xb1\x83\xc2\x14\x82\x51\xc8\x0c\xe6\xa0\x6b\xcc\x38\x2b\xcb\x1d\x2c\x77\x70\x2e\xeb\x35\xaa\xdf\xaf\x80\x89\x1c\x8c\x62\x42\x97\x56\x28\xe7\x0a\x33\x53\xee\x3c\x18\x57\x90\x49\xa5\x50\xd7\x52\xe4\x44\xa3\xa3\x5a\xef\x84\x61\x9f\xd2\xe8\xe8\x28\x3a\x3a\x82\xf7\x1a\xe1\x2d\xbb\xc5\xbf\x14\xab\x6b\x54\xb4\x1f\x3f\xd5\x52\x23\x54\x68\xd6\x32\xb7\xf4\xf6\xbb\x53\xf8\x6b\x8d\x02\x6a\xa6\x35\xc1\x6e\x58\xd9\xa0\x6e\xb5\x2f\x48\x37\x14\xb2\x2c\xe5\x96\x96\xcd\xae\x46\xc8\xa4\xd8\xa0\xd2\xed\xb9\x6a\x54\x85\x54\x15\xe6\xa7\x9e\x02\xdc\xc3\xb9\x74\xb2\xfd\x7f\xf7\x5d\xda\x9d\xf5\x7b\xf8\xb5\x83\xb9\x64\xd9\x2d\x91\xb4\x56\x2f\x58\x86\x77\x0f\x70\xef\x71\x7f\x18\xfb\xf7\xd4\xef\x5d\x09\x8f\xbb\x94\xb2\x84\xc1\xbf\x7b\x78\x25\x65\x89\x4c\x0c\xbe\x8f\xcb\x77\x24\x3c\x2e\x9d\x61\x85\x4a\x5b\xf7\x16\xa5\x64\x46\xdb\xfd\x97\x4d\xb5\x44\x35\xd4\x67\x45\x4e\x8e\xbf\x88\xab\x8d\x22\x7f\x0c\xf6\x5f\x4d\x7c\x1f\x97\x1f\xe2\x5e\x7f\xe0\xc2\xfc\x32\xdc\x7f\x21\xcc\x2f\x2f\x95\x62\xbb\x83\xef\xe3\xf2\x13\xb8\x3f\x9e\x8c\xe1\xfe\x78\x32\x00\x9e\x92\x9f\xc0\xfd\xf9\xa7\x85\xfb\xa3\x87\xfb\xf3\x4f\x53\xb8\xd3\x74\x3b\xb8\xcd\xc8\xc1\xee\xe1\x3d\x1f\x33\xc4\x94\xfc\x14\xee\xe1\xc1\x1c\xee\xd0\x10\x53\xf2\x53\xb8\xce\x10\x4d\x7b\x44\x87\x3b\x34\xc4\x7d\x4f\xea\xf3\xb8\x36\x22\x7f\xfe\xe9\x80\xef\x6f\xee\xeb\x01\xf0\x94\xfc\x24\xee\x41\xa4\x7b\xdc\x93\xe3\x29\xdc\xc9\x9b\x11\x70\x59\x59\x82\x34\x6b\x54\xa0\x4b\x9e\xa1\x0e\xfb\x87\xb1\x0b\xfb\x78\x68\xb3\xcc\x67\x70\x69\xbf\x1e\xee\xd7\x88\x4e\x53\x2f\xdd\x4d\x7d\x1f\xe2\xee\x2b\xc4\x81\x1d\xfc\xf7\x43\x7d\x24\x3f\x4f\xd3\xb4\xc3\x3a\x81\xef\x3f\xea\xf4\x8f\xe5\x47\xcc\x4c\x8b\x6b\x78\x85\xe9\x9f\xbc\xc2\x83\xfd\xaf\x99\x19\x63\x33\x21\x3f\xe4\xfb\xc3\xf8\x2a\x70\xa1\x0d\x13\x19\xca\x02\x2e\x65\xbe\xcf\xeb\x1d\x6a\x9f\xc5\xad\x58\xad\x17\x94\xa5\x9a\xcc\xe8\x71\xdc\x0e\x8c\x95\xbf\x76\x39\x6d\xdc\x81\xf7\xbe\x14\xbd\xcc\x73\x4e\x76\xa4\x72\xbb\xb0\xb5\x9c\x79\x2d\x54\xc6\x0c\xe3\x82\xd2\x22\xeb\xf2\x2c\x38\x96\xf9\x02\xa4\xa0\xe2\xbb\xb6\xe5\xce\xa0\x30\x20\x0b\x57\x0c\x69\x19\xb6\xbc\x2c\x61\x89\xb6\x6e\x62\xde\x2f\xa9\x36\xd7\x6f\xc8\xf7\x54\xd2\x58\x1a\xd5\x6d\x83\x11\x11\x27\xaf\x87\x6b\x60\x81\x04\x2a\xcf\x6d\xd8\x58\x48\x2b\xdd\x69\x2d\xb8\xd1\x6d\x29\xff\x06\x6d\xc5\xb0\x91\x80\x97\x20\x78\x09\xb5\xb4\x96\x25\xc9\x3d\x63\xfc\x4f\xc3\xca\xfe\x71\x9f\x69\x88\x45\x53\x96\x71\x1a\xe4\x32\x26\x40\x48\x43\xf6\x69\xc8\x3a\x8c\x4e\x5a\xb1\x1a\x6e\x71\x97\x46\xf6\x42\x78\x49\xe7\x8a\x3b\x7f\x48\xf8\xde\x7f\x7e\xb0\x76\x3a\x47\x03\x0a\x4d\xa3\x84\xb6\x96\x77\x42\xcf\x6c\x97\x56\xa3\x32\x3b\xd7\x8b\xd1\xd2\x8a\x6f\x50\x38\x78\xba\x21\x30\x97\x01\x2b\x21\x98\xf9\x2d\xee\x7c\x09\x4c\x5a\x25\x77\x1e\x1c\x64\xea\x6d\xec\x25\x13\xaf\xff\x0a\x0d\x50\x5b\xb4\xf2\xfa\x6d\x6f\xe4\x0d\xf7\xff\x92\xb9\xea\x91\x59\x78\xcc\xde\x6d\xbe\xdb\x13\xf2\xd2\x5e\x2c\xf0\x7a\x8d\x25\x1a\x04\x85\x95\xdc\xe0\x57\x99\xc6\x21\xf5\xac\xd3\xd1\xbe\x5f\x0d\x9a\xdf\xa0\x58\x99\xf5\xb8\x53\xe2\xd2\x2e\xc6\x2d\x85\x85\x6f\x14\x8d\xbb\x1f\x5c\x98\x11\x06\x0e\x71\x9e\xd0\xf2\x88\x47\xda\x65\xa7\xff\x42\xe4\xf8\xa9\xa7\x9e\x3f\x33\x6b\xc0\x12\x2b\x7f\x43\x99\x70\xa9\x7a\x44\x95\xdd\x3c\xe7\xa4\xe9\x73\x41\xe0\xc5\x3a\x41\xe0\xb4\x6a\x34\x4f\x56\x19\x36\x3b\xad\x8f\xf0\xb6\x97\x3e\x70\x38\x5d\x7d\xc8\xdc\xfd\xef\x9a\xdc\x65\x81\x43\x57\x0b\x56\xe1\x08\x17\x02\x99\xd3\x5a\x1b\x7b\x4c\xad\x34\x0c\x6a\xc9\xa4\x61\x5a\x00\xb7\x33\x4d\xd3\xbd\x5b\x36\xf2\x16\x07\x0c\x29\x53\x61\x59\xa4\xf0\xe7\x9a\x6b\x97\x31\x0b\xc6\x4b\xe0\x05\x70\x9b\x4c\x28\x47\xb0\xb6\x04\x8e\xba\x8c\x80\xe7\x4f\x24\xda\xd9\xd5\x21\x79\x89\x5b\xc8\x6c\xaa\xa4\x6c\x24\x70\xdb\xd6\x16\x97\xd9\xb9\x76\xa5\x3a\xe4\xdb\x51\xd2\x7d\xc6\x30\xcf\xa4\x70\x29\x4c\xaa\x64\x84\xff\x25\x6e\x9f\x4a\x3e\x6c\xe9\x30\xa7\x19\x64\xe4\xce\xf5\xaf\x97\x1d\x48\x58\x96\x49\x65\xc7\xc3\x7e\x41\x3a\x1c\xdb\x46\xa8\x92\x92\x79\xe2\x60\x86\xac\xfc\xaa\xbf\x12\x6e\x96\xf8\x12\x23\x3f\x72\x7c\x05\x27\xa7\x68\x9e\x04\xa8\x21\xaf\x56\x22\x04\xe2\x58\xc5\x18\xe4\xa1\x47\x73\x82\x79\xcd\x94\xc6\x0b\x61\xc6\xbc\x7b\x21\xcc\x64\xe2\x72\x6b\x2d\xab\x93\xe3\xc7\xf0\x3a\x39\xfe\x76\xcc\x4e\x8e\x1d\xb7\x93\xe3\x71\x76\x76\xdd\xf1\x7b\xcf\x1f\x45\xb0\xf9\x96\x0c\x9d\xce\x79\x12\x50\x87\x1c\x5b\x09\x47\xd2\x0e\x06\x5f\xe4\x18\x86\x84\x27\x92\xb4\xe0\x63\x34\xed\xc2\x3c\x69\x71\x87\x34\x83\x44\xeb\x6a\x77\xc9\x1f\xe3\xee\x90\x0e\x52\xb8\x42\x04\xc3\x96\x25\xd5\x06\x08\xdd\x62\x26\x2b\x5b\x62\xa8\x31\xcc\xd1\x30\x5e\x8e\xdd\x91\x56\xa3\x73\x77\xdb\x09\x8f\x3a\xbd\x95\xf4\x8e\x17\x9a\x15\xa3\x54\xa9\x63\x13\xd6\x37\xb5\x51\x0b\xd8\xae\x79\xb6\xb6\x6d\xdd\x12\x3b\xc7\xd8\x70\x06\x8d\xc5\x48\xdf\xb9\x66\x31\x85\x4b\x69\x2c\x0f\x91\x63\x6e\xa9\xd7\xcd\xb2\xe4\x19\x35\x82\x63\x61\x60\x77\xfb\x30\xa8\x8d\x1a\x8b\x83\x20\xe2\x38\xff\x53\x29\xa9\x00\x45\xc6\x6a\xdd\x94\x36\x9b\x77\xfc\x8b\xb4\xaa\x29\x79\x4b\x8d\xae\x3b\x6e\x94\xc0\x9c\x28\x49\x60\x70\x2e\xa1\x66\x82\x67\xb6\x2d\xae\xd8\x8e\xce\xa3\x30\x93\x1b\x54\x98\x2f\xa8\x80\xda\x94\x25\xe0\x7b\xa7\xc7\xac\x99\x81\xb5\x2c\x73\x67\x9d\x43\x4d\xa1\x58\xb8\x9e\xd6\x6d\xf1\xd3\xc5\x5d\x34\xf3\xa7\x8c\xba\xc4\xbb\xb6\xae\x50\x6b\x72\xb4\x1f\x2c\x3a\x67\xca\xa7\x35\x39\x13\xa2\x52\x9e\x62\xe2\x80\x3b\x49\x32\x9a\x79\x13\xc6\x87\x20\xa7\x10\xc3\x73\xfa\xd3\x76\xba\xb1\xd7\x1f\x27\x6d\x1a\x8d\x42\x82\x67\xd9\x6d\x8f\xaa\xb6\x5f\xda\xe6\xf2\x2b\x19\x5b\xfc\x31\xc6\x2d\x35\xab\x6f\x48\xec\xbc\x94\x4b\x56\xda\x3e\x47\xf7\x27\x90\x95\x5b\xf1\xe1\x3b\x8f\xb7\x5c\xe4\x72\x1b\xdb\x08\x5c\x2a\xb9\xd5\xe1\x0d\x2e\x3e\x7f\xf3\xc7\xab\x97\x6f\xdc\x0a\x8d\xaa\xe9\x47\x9d\xa4\xd1\x86\xa9\x80\x1e\xdc\x46\x0a\xdf\xca\xbc\x29\xd1\x2b\xdc\xcf\x00\xfe\xfc\x71\x65\x97\x63\xd8\x30\xc5\xed\xf5\xd5\x68\x68\xfa\xf2\xb8\x29\xfc\x8b\x0b\x73\xea\x06\x09\x70\xc2\xf6\x31\x56\x19\xd7\xb4\x3d\xfb\xa8\x53\xa7\xc2\x1d\xdb\xad\x69\x3a\xf8\xfe\x7f\x2f\x59\x85\xf1\x82\x5a\x88\xe4\x99\x23\xea\x59\x75\x89\xbe\x17\x39\x16\x9c\x22\x7d\xcf\xb5\xe3\x11\x47\x3b\x6e\x82\x54\xec\x80\xf6\xbb\xba\x58\xaf\x71\xd9\xac\x56\xa8\x60\x45\x2d\x6f\x26\xab\x9a\x97\x87\x33\x2e\x35\xfc\xb9\x97\x7b\x11\x53\x7c\x18\xdb\x10\x7b\x77\x07\x88\x79\x02\x77\x9d\xcc\x28\x58\xe9\x1b\x9f\x5e\x0f\xef\x97\x86\x53\xaf\xbb\x7f\x0a\x6b\x85\x1a\x85\xd1\xc0\x1f\x93\x60\xfa\xaa\x5c\xef\x3d\xd2\x7a\xb5\x51\x27\x78\xe9\xe3\xeb\x2d\xbb\xc5\xdf\x08\x62\xab\x58\xad\xbb\x9d\x1e\x85\x8e\xb3\x2c\xcb\x32\xd4\xe1\x8d\x3f\xbc\x97\xcb\xe2\xc0\x36\xd4\x4f\xc6\x2e\xe0\x98\x5a\x35\x64\x1a\x1d\xd3\x14\xb6\x95\x2a\x0f\x79\x3c\xa8\x9b\x17\xc2\x3d\xec\xd8\x2e\xd4\x13\xb4\x5d\xb6\xdb\x08\xd7\x1f\xda\x8c\xf9\x85\xb3\xb8\x18\x76\xbd\x7a\xfc\x5d\xe5\x15\xc4\x8b\x43\xa3\x14\x22\x09\x97\xea\xdf\xb8\xd3\x3d\x7f\xdc\xd2\x07\x1f\xe2\x6e\xa4\x18\x3e\x47\xb8\x03\xd0\xd6\x6e\x3a\xbf\xfe\xb0\xbf\xd2\xbc\x00\x09\x67\x67\xf6\x29\xe1\xfe\xde\xfd\xbd\x8f\xb7\xbb\x68\xd6\x35\xff\xec\x21\x9a\x31\x38\x3d\x0b\xfc\xed\x6d\x70\xa8\x71\xe2\x4f\x43\xb4\xe2\x05\xc8\x24\x9a\x69\x12\xa5\xc3\xcd\x83\xc6\x05\xb0\x76\x58\x4c\xa2\x99\xfd\xd1\x86\x84\xfe\xfe\x02\x38\xfc\xa3\xb3\xf8\x02\xf8\xf3\xe7\x56\xbd\xbe\xe6\x1f\xe0\x0c\x58\x3b\xf1\xed\xb3\x0d\xd1\xf1\xec\x74\x27\x34\xc2\x4f\x2a\xfb\x31\x62\x18\xb1\xae\x54\xae\x99\xb6\x31\x54\x53\xda\x29\x6c\x21\x09\x37\x1f\xf3\xf6\xf5\x46\x16\x14\xd0\xef\xb5\x5d\x2a\x79\xc6\x0d\x5d\x39\x83\xca\x06\x8e\x76\x7f\x76\x7e\xb5\xf1\xbf\xe3\xf8\x0a\x63\x1f\xa2\x0e\x7f\xcd\xd9\x07\x96\x27\xfb\x99\xf0\xdf\x90\x81\x0e\x2f\x4b\x12\xcd\xe4\xa4\x23\x68\x38\x21\x01\x97\x9e\x6e\x6e\xc2\xcd\xbd\x71\x87\xbf\xb9\x89\x17\xb0\x49\xa2\x59\xe0\x7c\x7a\x06\x1b\x07\xd1\x19\x94\xe2\x24\x94\x1f\x2b\x14\x8f\xb8\xcb\x2f\x8d\x38\xad\xb2\x9e\xf7\xcb\xc1\x71\xd1\x8c\xa2\xad\x72\xb0\xf5\xed\xaa\x53\x38\xe0\x6f\x67\x10\xc7\x70\x07\x47\x47\x76\x78\x0b\x3e\x88\x66\xb3\x59\x26\x85\xe1\xa2\xc1\x68\x46\xfe\xf6\xa7\xf2\x28\x34\xe7\x76\x60\x16\xee\x7e\x86\x59\xae\x0d\xf8\x8e\x35\x67\xe3\x57\x10\x3f\x39\x13\xf1\xff\x62\x78\xd3\x25\x23\x59\x2d\x81\xb1\x92\x75\x47\x57\xb2\x08\x47\x31\xbb\x3a\x4e\x16\x60\x54\x83\xe1\x12\xb0\xba\x2e\x77\x04\xe0\x86\x70\x3a\xfa\x43\x2f\x5e\x65\xd4\x8e\xbb\xf6\xcd\xfb\x55\x53\x14\x53\x21\xdb\x15\x28\x94\xac\x80\xc1\x72\x67\xfc\xc3\xb5\x0f\xa5\x3e\xce\x7c\x09\xd7\x1f\x48\xa6\x77\x74\xf7\xd0\x3d\x0c\xa6\x25\xc5\x4a\x51\x50\x51\x3c\x3d\xf3\xa8\xf6\x60\xdf\xb9\xaf\x71\xe2\xe6\xa4\x68\xe6\xde\x8e\x0e\xa5\xfc\x8b\x52\x2b\x15\xae\x64\x47\xc4\xbe\xbc\x84\x88\x5a\x5a\x8e\x6d\xc2\xb0\x72\x94\x31\xac\xb2\xf0\xdf\xe7\x0e\x35\x64\xbf\xb7\xee\x1d\x56\xf3\xaa\x2e\xd1\x3e\x52\x52\x2f\x97\xc2\x85\x7d\xa1\x68\x0b\x8d\x7d\xc2\xd4\x6b\xa9\xcc\xda\xfe\x92\x27\xd5\xf0\xee\x6b\x98\x2f\xb1\x90\xaa\x3b\x61\x24\xbe\x37\x7c\x3b\xf1\x62\xed\xfa\xad\x1e\x87\xfd\xcf\x06\x4f\x64\xe1\x7f\xa3\x98\x26\x71\xd5\xff\xb9\x23\x72\x1e\xe6\x82\xd3\x00\x73\x17\xcd\x8e\x8e\x80\x6d\x24\xcf\x21\x47\x96\x43\x26\x73\x04\x2c\x79\xc5\x05\xa3\xb0\x8d\x66\xd6\xc7\xb6\x87\xbb\x7b\x88\x66\x37\x70\x06\x18\x3d\x44\xff\x0b\x00\x00\xff\xff\x72\x0d\xcb\x80\x42\x1f\x00\x00"), - }, - "/nosync": &vfsgen۰DirInfo{ - name: "nosync", - modTime: time.Date(2019, 3, 5, 13, 38, 20, 257702305, time.UTC), - }, - "/nosync/map.go": &vfsgen۰CompressedFileInfo{ - name: "map.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 233338323, time.UTC), - uncompressedSize: 1958, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x55\x4d\x8f\xdb\x46\x0c\x3d\x5b\xbf\x82\x3d\x55\x2e\x14\xe7\x9e\x62\x0f\x05\x7a\x29\xd0\x34\x40\xdb\x5b\x90\x03\x2d\x71\xac\x81\xe7\x43\x1d\x52\xeb\x2a\x8b\xfd\xef\x05\x39\xb2\x57\xde\x24\x45\x0f\xbd\xd9\x23\x0e\xf9\xf8\xde\x23\x67\xc2\xfe\x8c\x27\x82\x94\x79\x49\x7d\xd3\xbc\x7d\x0b\xef\x71\x02\xcf\x80\xd0\xe7\xd4\xcf\xa5\x50\x12\x88\x38\xc1\xc5\xcb\x08\x18\x73\x11\xff\x99\x86\x37\x7d\x4e\x2c\x98\xe4\x8d\xf8\x48\x10\x32\x0e\xdc\x01\x4b\x2e\xc4\x1d\x60\x1a\x60\xa0\x40\x42\x7c\xd0\x9c\xbf\x88\xa6\x64\x74\x04\x2e\x17\x88\x73\x10\x3f\x05\x82\x53\x2e\x79\x16\x9f\x88\x41\x32\xf4\x18\x02\xa0\x02\xf8\x9e\x21\x92\x8c\x79\xe0\x0d\x8a\xb0\x68\x2e\x4d\xf7\xe7\x48\xf0\x99\x4a\xbe\x62\x7d\xc4\xe0\x07\x2b\x4a\x71\x92\x5b\xd8\x4f\xf6\x3d\xce\x2c\x90\xb2\xc0\x91\xa0\xcf\x93\xa7\x01\xd0\x09\x15\x70\xbe\xb0\xc0\xcc\x74\x68\x64\x99\xc8\x82\x59\xca\xdc\x0b\x3c\x35\xbb\xa8\x4d\x7f\xf4\x49\xa8\x38\xec\xe9\xe9\xf9\xd3\xe6\x77\xf3\x6c\x54\xfd\x9a\x71\x80\x42\x32\x97\xc4\x20\x23\x29\x90\x99\x2a\x0b\x03\xf8\x64\x67\xca\x9d\x36\x8d\x70\xa6\xa5\x83\x5c\x20\xf9\x00\xde\x41\xca\x9a\xa3\x5e\xf1\x0c\x53\x21\xa6\x24\x87\x6b\x83\xf9\x0c\x85\x78\x0e\x02\x3e\x0d\xbe\x47\x21\x86\xcb\x48\x32\x52\x59\x2f\x5d\x90\xc1\xe5\x39\x6d\x4b\x1d\x1a\x37\xa7\x1e\xda\x08\x3f\xbc\xc7\x69\x6f\x10\xdb\x33\x2d\xb0\x41\xbf\x87\x76\xad\xfa\x72\xd6\x69\xbd\x63\xce\x61\xaf\xcd\xdb\x67\x3b\x7a\x80\x78\x88\x1f\xcf\xb4\x7c\x6a\x76\xb5\x53\xb8\x7d\x5c\x59\xf8\x43\xdb\x05\x26\xd9\x72\x70\xeb\xf8\x35\x20\x8b\x6e\x8d\x8a\x2f\x40\x58\x6d\xef\xb4\x24\x3c\x3c\x18\x4f\x4f\xcd\x6e\x67\x7f\x21\xe2\x99\xda\x7f\xd1\x64\xdf\xec\x9e\x9b\xdd\x15\x2d\x3c\xd4\xf4\x1b\xa5\x3e\x94\x8a\x74\x2b\x18\xfd\xed\x59\x7c\x3a\x6d\x50\xeb\xb1\x11\xe6\xee\x24\xf9\xa0\xc4\x5f\x3c\x53\x07\x5e\x56\xa3\x9b\xe5\xb6\xe9\x4e\xfe\x91\x56\x82\x6e\x3a\xea\x68\xd0\x70\xd3\x92\x41\x8a\x76\xed\x36\x64\xa9\x90\x35\xac\x03\x87\x81\xed\x73\x75\xd1\xd7\xf4\x5c\x1b\xf9\x26\x89\x2d\xf6\x32\x63\xb8\x97\x77\x85\x71\x93\xd8\xbb\x17\x21\xe1\xdd\x8b\xcc\x3f\xea\x7f\x65\xfd\x5e\x6d\x05\x6d\x04\xff\xcf\xf2\xbc\x2a\x63\xdd\xaf\x9a\xfd\x6c\x0b\xe4\xba\x47\xfe\x8b\xb7\xea\x8d\x2f\xed\xfe\x55\x57\xd5\xc2\x86\xaa\x96\x68\xe3\x21\x76\x9a\x76\xbf\x02\xf8\x1d\xd3\x89\x6c\x2b\x31\x38\x60\xfa\x6b\xa6\x24\x1e\x43\x58\x0c\x02\x61\x3f\x9a\x53\xd4\x05\x15\xd9\x6a\x98\xbb\x79\xd4\xf5\xe7\xc0\xdd\x7c\x62\x2d\x76\x50\x2c\x39\x4b\x9e\x6a\x6b\x5e\xa8\xa0\xf8\x9c\xae\xdb\xab\x56\x1f\x32\xb1\x6d\xaf\x44\x3d\x31\x63\xf1\x61\x81\x3e\x97\x42\x3c\xe5\x34\xe8\xda\xc4\xa4\x27\x89\x3d\x8b\xd6\xe6\x84\x13\x8f\x59\x20\x57\x8b\xd9\x3a\xd5\x84\x7d\x4e\x1a\xc0\xef\x20\x65\xc3\x7d\xf1\x21\xe8\x56\x7c\xf4\xec\x85\x06\x88\x3a\x1d\x32\x62\x82\x9c\x7a\xea\xe0\x38\xcb\xbd\x4f\x8d\xf8\xb4\xe8\x65\x4d\xa8\x2b\xbd\xae\xba\x5c\x56\x99\x86\xbb\x7d\xdd\xad\x4d\x44\x5c\xa0\x90\x0b\xd4\x8b\xdd\x8f\x38\x4d\x3a\x74\x75\xdc\x50\xae\x09\x5d\xc9\xd1\x02\xa6\xec\x93\xc0\x30\x17\x8d\xd2\xfa\x2f\x52\xdc\xd3\xa3\x99\x8f\x04\x1f\xda\xdf\xf6\xf5\x81\xd2\xe0\x34\xc7\x23\x15\xed\x9f\x02\x45\x6d\x79\xbb\x8b\x49\x47\xd4\x6f\x14\xb1\xca\x36\x75\xf5\x5d\xb0\x97\xcf\xde\xb6\x4d\x26\x73\xc1\x6b\xbf\x19\x86\xd6\x81\x9e\x7e\x73\x1a\x6f\x13\xa7\xdd\x9e\x3b\x78\xd4\x69\xab\xea\xab\x23\xd5\x8a\xde\xc1\x77\xae\xd5\x6f\x16\xb8\xdb\x1d\x0b\xe1\xb9\xd9\xa9\x37\xf5\xad\xf9\x27\x00\x00\xff\xff\xe8\x19\x65\x16\xa6\x07\x00\x00"), - }, - "/nosync/mutex.go": &vfsgen۰CompressedFileInfo{ - name: "mutex.go", - modTime: time.Date(2019, 3, 5, 13, 38, 20, 257752198, time.UTC), - uncompressedSize: 2073, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\xcb\x6e\xdb\x30\x10\x3c\x4b\x5f\xb1\xc9\xc9\x4e\x62\xa5\xbd\xb6\xf5\xa1\x68\x81\x22\x40\x7a\x09\x50\xe4\x4c\x53\x2b\x99\xb0\x44\x1a\x24\x55\xd5\x4d\xf2\xef\xc5\xf2\x21\xcb\x92\xec\xc4\x2d\xaa\x93\xb0\xe4\xce\xce\xec\x0c\xb8\x65\x7c\xc3\x4a\x04\xa9\xcc\x4e\xf2\x34\xbd\xbd\x85\xef\x8d\xc5\x5f\x20\x0c\x30\xc8\x9b\xba\xde\x41\xbb\x16\x7c\x4d\x05\xa9\xe4\x62\x55\x29\xbe\x11\xb2\xcc\x52\xbb\xdb\x62\xb8\x6c\xac\x6e\xb8\x85\xa7\x34\xa1\x53\xcc\x61\xa5\x54\x95\xbe\x38\xb8\x7b\xc5\x37\x40\x65\x03\x75\x06\x77\xd6\x23\xeb\x46\x2e\xac\xa8\x11\x50\x6b\xa5\x41\x14\x50\xbb\x83\x4a\x23\xcb\x77\xe0\x61\xb2\xb4\x68\x24\x87\x59\x0d\x57\x6e\xce\xdc\x81\xcd\xe6\x34\x88\x3a\xb2\x30\xed\x29\x4d\x92\x2d\x93\x82\xcf\x2e\xbd\x8e\x0f\x50\x77\x22\x0e\x10\x2f\xe7\x69\xf2\x92\x26\x5d\xe7\x12\xac\x6e\x30\x30\xfd\x21\xa9\x0a\x8d\x7c\x2b\x5b\xa9\xec\x51\xa6\x1e\xac\xe3\x7a\x71\x8a\xac\x9f\x08\xaa\x08\x7f\x98\x7b\xfe\x63\xb6\x05\xab\x4c\xa4\xfb\xf0\x78\x96\x53\xf1\xfa\xde\xab\x56\x0b\x8b\xf7\x1e\x9a\x3e\x67\x5a\x42\xeb\xa2\xe2\x17\xd5\x48\x8b\x1a\x84\xb4\x13\x4e\x42\xa1\x34\x10\x00\x0d\x38\xb1\x27\xdd\x8e\x4d\x70\xbd\x54\x10\xb2\x84\x1e\x4c\xd8\xa1\x6e\xe1\x2a\x90\x1d\x18\xae\xdb\x6c\xc8\xee\x62\x09\xef\xe0\xf9\x99\x8e\xfa\x72\xce\x4e\xc4\xa0\xff\x54\x2e\x74\x7b\x9e\xf8\x7d\x4a\x0e\xfa\xa6\xd4\x0e\x43\xf3\xba\xaa\x57\xa2\x33\x92\x75\x10\xa0\x91\xa1\xc1\x94\xff\x69\xe8\xc3\xd0\xd1\x7f\xb5\x6d\x90\x88\xeb\xeb\xa8\xae\xb3\x2d\x57\x48\x5a\x8c\x90\x65\x85\x41\x35\x67\x55\xf5\x11\x84\x05\x77\x48\x16\xb1\xa2\x40\x6e\x41\xd9\x35\x6a\x30\xa2\x6e\x2a\xcb\x24\xaa\xc6\x38\x65\xa8\xcd\xd9\x4e\xc7\x6d\x4e\xae\x61\x60\xf5\x44\xb4\x97\x14\xed\xbf\xb2\x7c\x80\xb4\x58\x84\x95\x3c\x32\x61\xbf\x69\xd5\x6c\xdf\xfa\x66\xec\x1b\xf6\xaf\x06\x1f\xbd\x0b\x9f\xf3\x1c\x58\x9e\x1b\xc8\xb1\xb2\xec\x26\x20\xd6\x6c\x07\x2b\x04\x89\x25\xb3\xe2\x27\xde\x80\x55\x60\xd7\x7d\xcc\xbb\xc2\x15\x22\x60\xe9\x9c\xe8\xae\x13\xaa\x53\x6e\xe2\x02\xdb\x12\xae\xba\xee\x39\x5d\x98\xb9\x89\x44\xc5\xed\xb1\x2d\xb3\x08\x76\xbd\xf4\x6c\xdc\x72\x7b\xf5\x4f\x87\x3b\xf5\x1b\x8d\x43\x7b\xdc\xc2\x7d\xbf\x53\x2f\xf3\xab\x92\x08\x39\x72\x8d\x35\x4a\x6b\x06\x62\x42\xc3\x11\xae\xd4\x3b\x8b\x1c\x89\xf8\xe2\xfd\xbc\x67\x4a\x10\x4a\x49\x9a\x44\x8d\xe1\xfa\x8d\x5a\x1d\x99\x40\xbf\x5d\x9a\x7a\x82\x2f\x96\x53\x8a\xc7\x13\x22\x7c\x54\xfc\x27\x00\x00\xff\xff\xec\x95\x29\x83\x19\x08\x00\x00"), - }, - "/nosync/once.go": &vfsgen۰CompressedFileInfo{ - name: "once.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 233609287, time.UTC), - uncompressedSize: 1072, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\x53\xcb\x92\xda\x40\x0c\x3c\xdb\x5f\xd1\xb5\x27\x9c\xa2\xe0\xbe\xa9\x1c\x52\xc5\x65\x4f\x39\xe4\x0b\xc4\x58\x03\xca\x0e\x1a\x32\x0f\x58\x67\x8b\x7f\x4f\x69\x6c\x08\xb9\xd9\x92\xba\xd5\x6a\x69\xce\xe4\xde\xe9\xc0\xd0\x98\x27\x75\x7d\xbf\xdd\xe2\x87\x3a\x86\x64\x90\x22\xee\x7f\xb1\x2b\x28\x47\x2a\xb8\x4a\x08\x38\x73\xf2\x31\x9d\xc0\x1f\xe4\x4a\x98\x10\x95\x41\xae\x48\xd4\x4d\x5f\xa6\x33\xcf\xe0\x5c\x52\x75\x05\x9f\x7d\x37\x46\xd1\x03\xf6\x31\x06\xfb\x56\xc6\xfc\x7d\x6b\x8d\x76\x11\x8e\x42\xc8\x28\x47\x86\xaf\xda\x78\xe0\x21\x1e\xa4\x23\xa2\x86\xc9\xbe\x77\xd1\xd4\xec\xd9\x98\xac\x9e\x47\xf8\x98\x0c\x64\x24\x5e\x52\x2e\x28\x72\xe2\x25\x2a\x19\xa2\xb9\x90\x09\x89\xbe\x09\xda\xe0\x4d\x11\xcb\x91\x13\xae\x31\x8d\x79\x8d\x83\x5c\x58\x0d\xde\x5d\x28\x21\x5a\xad\x15\x5a\x44\x7c\xfb\xdf\xec\xe2\xca\x0f\xd6\x79\xe9\x79\xaa\xa1\xc8\x39\x70\xeb\x95\xd7\xb3\xbc\xa6\xbc\x29\xb0\xaa\xd9\x23\xd1\x4b\x7c\x67\xf8\xb5\xb1\xf1\x85\xd5\x28\x3d\x8e\x94\x41\x18\xc5\x7b\x4e\xac\x05\x17\x0a\x95\x21\x0a\x26\x77\x6c\x20\x47\xcd\x48\xe0\x3b\x94\xaf\xcf\x53\x3c\xaf\x25\xf1\xef\x2a\x69\x31\xa1\x61\x1f\xd6\x95\x08\xfe\x60\x57\x0b\x6f\xfa\xed\x76\xb1\xb8\xf9\x51\x58\xc7\x05\x22\x2a\x45\x28\xc8\x1f\x9a\x31\xb6\xdb\x53\xcd\x05\x7b\x46\xaa\xfa\xb4\x5a\x33\x0e\x3f\xc5\xfa\x36\x05\x92\xa1\x12\x68\x14\xb7\x86\x14\x9c\x68\x32\x8c\xb2\xe3\x9c\x29\x4d\xd6\xbe\x66\x06\xfd\x13\x14\xa4\x70\xa2\x60\x19\x47\xe7\x52\x13\xdf\xd7\x46\xe9\x50\x4f\xac\x25\x5b\x8e\xfe\x1b\x61\xcf\x8b\x85\x23\xf6\x13\x76\xf1\xb5\xed\xc9\x45\xf5\x72\xd8\x3c\x56\x53\xd5\xad\x06\x7c\x62\x89\xdb\x54\x2b\x2f\x81\x95\x4e\x3c\xe0\x36\x2c\x06\xbc\x99\xf5\x8e\x6a\xe6\x6c\x66\xcc\xf4\xf3\x46\xdb\x10\xf3\x55\x93\x8a\xdb\x3c\x23\x5a\x24\xaf\xdb\x89\x46\xcd\x32\x72\xca\x56\x5e\x22\x8e\x74\x61\x24\x2e\x35\x29\x8f\x5f\xe1\x6b\x1b\x6b\x3e\xe4\xd8\xae\x75\x4e\x1a\xd7\x55\xca\x31\xd6\xf9\x38\xec\x7c\x7d\x6b\x62\xda\xb1\x8a\xf8\x62\x2b\x1d\x60\xd3\x60\x9e\x67\xb0\x37\x63\x07\xb8\x69\x8f\xe5\xb3\xef\xba\x85\xac\xbb\x3d\x12\x46\x64\x99\xa6\x71\xf5\x32\xbf\xdc\xd7\xfb\x6b\xe2\xb1\x75\x15\x85\x7f\x19\x1a\xec\x8e\xf9\x86\x92\x2a\xf7\xdd\xc8\x9e\x13\xee\x06\xf6\xdd\x53\x81\xa7\x90\x79\x89\x28\x3f\x10\xb7\xd5\xd0\x77\x7e\x35\xf4\xb7\xfe\x6f\x00\x00\x00\xff\xff\xf9\x72\xbe\xa9\x30\x04\x00\x00"), - }, - "/nosync/pool.go": &vfsgen۰CompressedFileInfo{ - name: "pool.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 233714234, time.UTC), - uncompressedSize: 2130, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x55\x3f\x93\xdb\xc6\x0f\xad\x4f\x9f\x02\xbf\xea\x77\xca\xe8\x74\x49\xeb\x99\x2b\x32\x29\x1c\x37\x89\x8b\x74\x1e\x17\x10\x09\x8a\x88\x97\x0b\x06\xc0\x4a\xa2\x3d\xf7\xdd\x33\x58\xfe\x39\x39\xee\x44\xee\xf2\xe1\xe1\xbd\x07\x68\xc4\xe6\x0b\x9e\x09\xb2\xd8\x94\x9b\xdd\xee\xf9\x19\x7e\x85\x8f\x22\x09\xd8\x00\xc1\xc8\x41\x3a\x70\x1a\x46\x51\xd4\x09\xe4\xf4\x37\x35\x6e\xe0\x3d\x3a\x0c\x38\xc1\x89\x80\x73\xcb\x17\x6e\x0b\xa6\x34\x81\xe1\x85\x5a\xc0\xdc\x06\x94\x92\x2b\xd3\x85\xda\xe3\xee\xf9\xb9\x62\xe7\x09\xd8\x69\x00\x73\x51\x6a\x81\x33\x78\x4f\x73\xc1\x05\x4d\x69\x90\x0a\x51\x5c\x06\x74\x6e\x2a\x2c\x3a\x60\x9e\xc0\x79\x20\xb8\xb2\xf7\x52\x3c\xf0\xb2\x38\x77\xdc\xa0\xb3\xe4\x23\x7c\xe8\xde\xd0\x7a\x49\xad\xd5\x47\xc9\x69\x02\xa5\x8e\x94\x72\x43\x70\xed\x29\x8a\xb2\x41\x8f\xe3\x48\xd9\x0e\x71\x2b\xc0\x2a\xb1\x81\xcf\xbd\x07\x8f\x96\x30\x25\x69\xd0\xef\xd8\x6f\xca\x18\x76\x04\x9d\x28\x14\x23\x38\x4d\x30\x94\xe4\x3c\x26\x82\xb3\xa8\x14\xe7\x4c\x06\xc6\xf1\x16\x33\x49\xb1\x34\xad\x18\x81\xf0\x7f\x83\xb1\xe8\x28\x46\x81\xe5\x02\x0d\x36\x3d\xc1\x56\x0f\x4e\xc5\xa1\xe4\x62\xa1\x90\xd3\x60\xb5\x54\x42\x27\x05\xa5\x62\x74\x98\xc5\x4d\x4c\x17\xce\x67\x18\x95\xcc\x8a\x46\xab\xb5\xe3\x33\xea\x29\x4c\x6d\x24\x25\x6a\x5c\xf4\x08\x7f\x85\x5f\x6c\x07\xe0\xb0\xed\x0b\x59\xfc\x20\xb4\x09\x5c\x02\xec\x54\x38\xb5\x40\x5d\xc7\x0d\x53\xf6\xd0\x44\x09\xdb\xa7\xb9\x51\x25\x82\xc4\xe6\x76\x84\xdf\xe5\x4a\x17\xd2\x0a\xc4\x16\x06\x80\x15\x76\x3c\xa5\x59\x10\x4c\x29\xf0\xee\x3e\xd9\xac\x07\x1c\x47\x95\x51\x19\x9d\xaa\x70\xd2\x01\x6e\x92\xba\xc0\x80\x39\x68\x23\x9c\x55\xca\xf8\x7d\xf0\xaa\x0e\x81\x63\x9c\x28\x7b\x24\xad\xc7\x88\x10\x0e\x92\xcf\x11\x38\x18\xc5\x29\x3b\xd7\xbc\x54\x99\xda\xb0\xa6\x91\xdc\x14\x55\xca\x1e\x41\xa5\x91\x72\x4b\xb9\x86\xa7\x49\xd1\xaa\xcd\x34\x96\x41\x38\xce\x7c\x46\x95\x0b\xb7\x14\x23\x70\xc5\xd0\x28\xca\xa8\xf3\xd7\xcd\x25\x96\x0c\x72\x21\xed\x09\x6b\xd4\xb1\x51\x31\x8b\x16\xa6\x15\xf8\xae\x73\xba\xe1\x10\xf1\x90\x0e\xce\x22\xed\x8f\xdd\x2f\x83\xd0\x0d\xbe\x32\x39\xc0\xb5\xe7\xa6\x87\x01\x39\x3b\x72\x36\xc0\x00\x6b\xa7\x8c\xc3\x3c\x14\x4f\xc6\x5f\xa9\x9d\x47\xe9\x3f\x53\x5a\x7c\x2c\x0e\xa7\xd2\x75\xa4\x16\xee\xd3\x72\xcd\x1a\x4c\x64\x50\x72\x4b\x1a\x70\x49\xb0\x85\xc7\x3a\x13\x95\xfa\x5d\x7e\x51\x09\xb0\x71\xbe\x50\x9a\x60\x54\xce\xce\xf9\xbc\xaf\x4a\x5b\xaf\x9c\xbf\x58\x9d\xa5\x40\xf9\xa7\x30\x59\x43\xd9\xd7\x96\xff\x9c\xdb\x11\xef\x49\xa1\xc7\xdc\x1e\x00\xdf\x32\xb1\xf5\x14\xf6\x19\x8c\xa8\x3e\xab\x61\xbd\xa8\x3f\x25\x8e\xf9\x9f\x37\x0d\xb0\x2d\x73\x1e\xc7\x6b\xd0\x42\xbe\x1a\xb6\xaa\xdf\x01\x8c\x63\xb2\x6b\xc5\xc5\x12\x68\x85\xe6\x74\x6e\xc6\x5d\x29\x25\xe0\xca\xb7\x6e\xaf\x20\x8c\xca\x72\x84\x0f\x35\xca\x43\xe8\xb3\x4d\x40\x78\xde\xe3\x85\xc0\x4a\xd3\x6f\x6b\x8f\xc3\xc5\xa1\x1e\xf7\xc4\x0a\x72\xcd\xdf\xa5\xbd\xf6\xef\xd3\xb8\x2c\x21\x73\x2d\x8d\xc3\xb7\xdd\xc3\xac\xfe\xa7\xcf\x9c\x9d\xb4\xc3\x86\xbe\xbd\xee\x1e\xfe\xa0\x2b\x00\x74\x25\x37\x8f\x7b\xb8\x3f\x79\xad\x8b\xf8\x3d\x39\x18\xa5\x5a\x18\x33\xa0\x9e\xd8\xb7\x59\x80\x4e\x65\xd8\xd6\xdd\x61\x59\x9b\x75\xac\xd7\x93\x75\xdd\x1c\xaa\x67\x4a\x5e\x34\xd7\x0b\x2e\xf5\xc3\x08\x11\xe9\x71\x2d\x15\xfb\xb7\xe9\x25\xb6\x92\x0b\xf0\x39\x07\xe3\xb8\x37\x46\x2b\x01\xe1\x4a\xb1\x45\x3c\x4c\xa3\x61\xf4\xba\xd4\xe0\xb7\x0a\x63\x61\x5e\x49\xed\xac\xb9\x59\x19\xa8\x6e\x6c\xa5\x34\x0f\xcb\x89\xfc\x4a\x94\xe1\x82\xa9\x50\x98\x6e\x31\xa0\x2e\xf0\xb1\xf8\xfa\x7f\x11\xd5\x96\xf3\x99\xee\x3c\xc2\xef\x69\x0b\xd6\x87\xae\x72\xbd\xd6\x52\x35\x5e\x57\x36\x5a\x6e\x43\xe6\x99\xe8\x78\x0c\x69\xeb\x7a\xca\x4f\x99\xd3\xa1\x7e\xb4\x28\xb0\x16\x52\xb2\x92\x6a\xf0\x42\x88\xba\x47\xe3\xb3\xe3\x2e\x0c\x81\xc7\x11\x7e\x0a\xf1\xf6\xf1\xe9\xf7\xf6\x84\x9f\xdc\x41\xa2\xfc\x38\x1e\xab\xb1\x7b\x78\x79\x81\x9f\xe3\x7d\x1c\xcc\xd5\xff\xf7\x52\xe9\xc4\xbb\x87\x85\x5e\x3d\x78\xdc\xef\x1e\x1e\x5e\x77\xdb\xcb\xcc\x69\x17\xcf\x37\x78\xf7\x02\x0b\xde\xa7\x7b\xec\xa7\x5f\x3e\xef\x1e\x96\x07\x78\xbb\xf2\xee\x87\x3b\x0b\xe0\x6d\x89\x4f\xd5\xb5\x6d\x0d\x6e\xab\xe1\x61\xe4\x0f\xed\x7d\x2c\xfe\x78\xbb\x6f\x6f\xbf\xf4\x77\x8b\xa6\xd6\x16\x66\xec\x4a\xf4\x8d\x4a\xfd\xff\x6c\x57\x12\x07\xb8\xed\x77\xaf\xbb\x7f\x03\x00\x00\xff\xff\x07\xba\x3e\x57\x52\x08\x00\x00"), - }, - } - fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/js"].(os.FileInfo), - fs["/nosync"].(os.FileInfo), - } - fs["/js"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/js/js.go"].(os.FileInfo), - } - fs["/nosync"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/nosync/map.go"].(os.FileInfo), - fs["/nosync/mutex.go"].(os.FileInfo), - fs["/nosync/once.go"].(os.FileInfo), - fs["/nosync/pool.go"].(os.FileInfo), - } - - return fs -}() - -type vfsgen۰FS map[string]interface{} - -func (fs vfsgen۰FS) Open(path string) (http.File, error) { - path = pathpkg.Clean("/" + path) - f, ok := fs[path] - if !ok { - return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} - } - - switch f := f.(type) { - case *vfsgen۰CompressedFileInfo: - gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) - if err != nil { - // This should never happen because we generate the gzip bytes such that they are always valid. - panic("unexpected error reading own gzip compressed bytes: " + err.Error()) - } - return &vfsgen۰CompressedFile{ - vfsgen۰CompressedFileInfo: f, - gr: gr, - }, nil - case *vfsgen۰DirInfo: - return &vfsgen۰Dir{ - vfsgen۰DirInfo: f, - }, nil - default: - // This should never happen because we generate only the above types. - panic(fmt.Sprintf("unexpected type %T", f)) - } -} - -// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. -type vfsgen۰CompressedFileInfo struct { - name string - modTime time.Time - compressedContent []byte - uncompressedSize int64 -} - -func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { - return nil, fmt.Errorf("cannot Readdir from file %s", f.name) -} -func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } - -func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { - return f.compressedContent -} - -func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } -func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } -func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } -func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } -func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } -func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } - -// vfsgen۰CompressedFile is an opened compressedFile instance. -type vfsgen۰CompressedFile struct { - *vfsgen۰CompressedFileInfo - gr *gzip.Reader - grPos int64 // Actual gr uncompressed position. - seekPos int64 // Seek uncompressed position. -} - -func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { - if f.grPos > f.seekPos { - // Rewind to beginning. - err = f.gr.Reset(bytes.NewReader(f.compressedContent)) - if err != nil { - return 0, err - } - f.grPos = 0 - } - if f.grPos < f.seekPos { - // Fast-forward. - _, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos) - if err != nil { - return 0, err - } - f.grPos = f.seekPos - } - n, err = f.gr.Read(p) - f.grPos += int64(n) - f.seekPos = f.grPos - return n, err -} -func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { - switch whence { - case io.SeekStart: - f.seekPos = 0 + offset - case io.SeekCurrent: - f.seekPos += offset - case io.SeekEnd: - f.seekPos = f.uncompressedSize + offset - default: - panic(fmt.Errorf("invalid whence value: %v", whence)) - } - return f.seekPos, nil -} -func (f *vfsgen۰CompressedFile) Close() error { - return f.gr.Close() -} - -// vfsgen۰DirInfo is a static definition of a directory. -type vfsgen۰DirInfo struct { - name string - modTime time.Time - entries []os.FileInfo -} - -func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { - return 0, fmt.Errorf("cannot Read from directory %s", d.name) -} -func (d *vfsgen۰DirInfo) Close() error { return nil } -func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } - -func (d *vfsgen۰DirInfo) Name() string { return d.name } -func (d *vfsgen۰DirInfo) Size() int64 { return 0 } -func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } -func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } -func (d *vfsgen۰DirInfo) IsDir() bool { return true } -func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } - -// vfsgen۰Dir is an opened dir instance. -type vfsgen۰Dir struct { - *vfsgen۰DirInfo - pos int // Position within entries for Seek and Readdir. -} - -func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { - if offset == 0 && whence == io.SeekStart { - d.pos = 0 - return 0, nil - } - return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) -} - -func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { - if d.pos >= len(d.entries) && count > 0 { - return nil, io.EOF - } - if count <= 0 || count > len(d.entries)-d.pos { - count = len(d.entries) - d.pos - } - e := d.entries[d.pos : d.pos+count] - d.pos += count - return e, nil -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/natives/doc.go b/vendor/github.com/gopherjs/gopherjs/compiler/natives/doc.go deleted file mode 100644 index c176d5b33..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/natives/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package natives provides native packages via a virtual filesystem. -// -// See documentation of parseAndAugment in github.com/gopherjs/gopherjs/build -// for explanation of behavior used to augment the native packages using the files -// in src subfolder. -package natives - -//go:generate vfsgendev -source="github.com/gopherjs/gopherjs/compiler/natives".FS -tag=gopherjsdev diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/natives/fs.go b/vendor/github.com/gopherjs/gopherjs/compiler/natives/fs.go deleted file mode 100644 index 13bbd3b53..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/natives/fs.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build gopherjsdev - -package natives - -import ( - "go/build" - "log" - "net/http" - "os" - "strings" - - "github.com/shurcooL/httpfs/filter" -) - -// FS is a virtual filesystem that contains native packages. -var FS = filter.Keep( - http.Dir(importPathToDir("github.com/gopherjs/gopherjs/compiler/natives")), - func(path string, fi os.FileInfo) bool { - return path == "/" || path == "/src" || strings.HasPrefix(path, "/src/") - }, -) - -func importPathToDir(importPath string) string { - p, err := build.Import(importPath, "", build.FindOnly) - if err != nil { - log.Fatalln(err) - } - return p.Dir -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/natives/fs_vfsdata.go b/vendor/github.com/gopherjs/gopherjs/compiler/natives/fs_vfsdata.go deleted file mode 100644 index 749606e01..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/natives/fs_vfsdata.go +++ /dev/null @@ -1,1056 +0,0 @@ -// Code generated by vfsgen; DO NOT EDIT. - -// +build !gopherjsdev - -package natives - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - pathpkg "path" - "time" -) - -// FS is a virtual filesystem that contains native packages. -var FS = func() http.FileSystem { - fs := vfsgen۰FS{ - "/": &vfsgen۰DirInfo{ - name: "/", - modTime: time.Date(2019, 4, 10, 21, 27, 44, 478007813, time.UTC), - }, - "/src": &vfsgen۰DirInfo{ - name: "src", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225440940, time.UTC), - }, - "/src/bytes": &vfsgen۰DirInfo{ - name: "bytes", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 212650386, time.UTC), - }, - "/src/bytes/bytes.go": &vfsgen۰CompressedFileInfo{ - name: "bytes.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 212511031, time.UTC), - uncompressedSize: 508, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x90\xcd\x4e\xc3\x30\x10\x84\xcf\xde\xa7\x18\x6e\x8d\x68\x55\x72\x45\x4d\x0f\x20\x0e\x3c\x43\xd5\xc3\xda\xdd\x54\x86\xe0\x14\x27\x91\xa8\x50\xde\x1d\xd9\x71\x1a\x19\x55\xca\x21\xde\x9f\x99\x6f\x67\xbb\xc5\xa3\x1e\x6c\x73\xc2\x47\x47\x74\x61\xf3\xc9\x67\x81\xbe\xf6\xd2\x11\xd5\x83\x33\x78\x77\x27\xf9\x79\xb9\xf6\xb2\xea\x70\x38\x86\xce\x1a\x26\x4e\x14\xb0\xae\xc7\x2f\xa9\xba\xf5\xb0\x6b\x68\x3c\x57\xf0\xec\xce\x82\x2e\x94\x95\xad\xa1\x51\x55\x30\xf1\xa5\xbc\xf4\x83\x77\xb0\xa4\xd4\x48\xe1\x4b\x85\x4d\x49\x63\x32\x7b\xfb\x1e\xb8\x59\x71\xd0\x9a\xbc\x0a\xe8\xb6\x6d\xc2\xbe\xad\xd1\x88\x5b\x71\x81\x87\x2a\xfe\xe9\x22\xca\x26\x91\x9a\x9b\x4e\xa2\x6a\xa2\x31\x0b\x0d\xcf\x34\x26\xec\xea\x83\x3d\x66\x40\x69\x35\x87\xea\xfd\x20\x37\xac\xd7\xf6\xeb\xc2\x5e\x72\xb0\xfc\x78\xc3\x77\xfc\x2c\xf6\x19\xeb\x2c\x5e\x4e\x6e\xca\xc4\xc8\x02\x50\xe2\x63\xec\x60\x74\x36\xbb\x99\x87\xa7\xfe\xfe\x7f\xbf\xbc\x91\x2f\x09\xed\xee\x04\x14\x74\x96\xf3\x9e\x68\xa4\xbf\x00\x00\x00\xff\xff\x23\x2d\xfc\x5d\xfc\x01\x00\x00"), - }, - "/src/bytes/bytes_test.go": &vfsgen۰CompressedFileInfo{ - name: "bytes_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 212684307, time.UTC), - uncompressedSize: 215, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\xcc\xc1\x4a\xc4\x30\x10\x87\xf1\x73\xe7\x29\x86\x5c\x6c\x55\xba\x8f\xb1\xe0\xb5\xde\x44\x24\x4d\xff\xb6\xe3\xa6\x93\x90\x99\x22\xab\xf8\xee\xb2\xe0\xc5\xeb\xc7\x8f\xef\x74\xe2\x87\xf9\x90\xbc\xf0\x87\x11\xd5\x98\x2e\x71\x05\xcf\x57\x87\xbd\x39\xcc\x89\x64\xaf\xa5\x39\xf7\xd4\x85\x5b\x10\x5d\x03\x0d\x44\xef\x87\x26\x5e\xa2\xae\x68\xe5\xb0\x29\x4b\x42\xef\x7c\xff\x47\xc6\xe7\x81\x5f\x5e\x6f\x1b\xfe\xa6\xce\xc7\xe9\x22\xb5\x0f\xff\x39\x37\x64\x81\x71\x51\xb6\xab\xa5\x98\xf3\x78\x86\xd7\xb8\xc2\xe4\x0b\x8f\xfc\xb9\x49\xda\xf8\x5c\xea\x86\xf6\x34\xf1\x52\x60\x7a\xe7\x2c\x7b\xcd\xd8\xa1\x1e\x06\xa2\xae\x46\x95\xd4\x87\x43\x1b\x62\xda\xe2\x9c\x11\x06\xfa\xa1\xdf\x00\x00\x00\xff\xff\x25\x40\x6e\x83\xd7\x00\x00\x00"), - }, - "/src/crypto": &vfsgen۰DirInfo{ - name: "crypto", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213341455, time.UTC), - }, - "/src/crypto/internal": &vfsgen۰DirInfo{ - name: "internal", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 212840574, time.UTC), - }, - "/src/crypto/internal/subtle": &vfsgen۰DirInfo{ - name: "subtle", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 212919879, time.UTC), - }, - "/src/crypto/internal/subtle/aliasing.go": &vfsgen۰CompressedFileInfo{ - name: "aliasing.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 212967731, time.UTC), - uncompressedSize: 654, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\x4d\x6f\xd4\x3c\x14\x85\xd7\xe3\x5f\x71\x14\xbd\xea\x9b\x68\xda\x84\x6e\x11\x45\x62\x55\xc1\xa6\x0b\x90\x58\x20\x16\x8e\x73\x27\x76\x70\xae\xa3\xeb\x1b\x88\x85\xf8\xef\x68\xa6\xa5\x7c\x0e\xec\x72\xa5\xe7\x39\xe7\xc4\x5d\x87\x7d\xbf\x86\x38\x60\xca\xc6\x2c\xd6\x7d\xb0\x23\x21\xaf\xbd\x46\x32\x26\xcc\x4b\x12\x45\x35\x06\xf5\x6b\xdf\xba\x34\x77\x63\x5a\x3c\xc9\x94\xbf\x7f\x4c\xb9\x32\xa6\xeb\xf0\x82\xcb\xdd\x47\x92\x68\x17\x08\x1d\xbd\x8c\x4f\x9e\xd4\x93\x60\x83\xe5\x01\x05\xd9\x5b\x21\xcc\x34\x27\x29\xb0\x0a\xcb\x05\x35\x27\x05\x93\xa3\x9c\xad\x84\x58\x8e\x51\x2e\x89\x50\x5e\x12\x0f\x81\xc7\x06\x81\x07\xda\x5a\xbc\xf1\x8f\x6e\x4f\x25\xf1\x00\xf5\x84\x1c\x83\x23\x44\xe2\x51\x3d\x42\x46\x18\x39\x09\x0d\xad\x39\xac\xec\x7e\x18\x55\x6f\x97\x28\x78\xf7\xbe\x2f\x4a\x0d\xfa\x94\x22\x3e\x9b\x5d\xd7\xe1\xf6\xf4\x23\xaf\x5e\x3f\xc5\x5b\x82\xb3\xfc\xbf\x42\x28\x16\x24\xc6\x92\x02\x2b\x09\xac\x04\xf5\x33\x69\x70\x97\xc8\x09\x6b\xa6\x47\xeb\xa1\xff\xc4\xb1\x8d\xb9\x35\x3b\x21\x5d\x85\x8f\x93\xea\xad\xc1\x73\x3c\xc1\xc5\xc5\xe9\x2a\xdf\x2e\xb3\xdb\x4d\xb9\x7d\xf9\xe0\xdc\xf5\x13\x39\xad\xb7\xa6\xbd\x25\xad\xab\xff\xac\x88\x2d\x55\x83\x9b\x1b\xfc\x4e\x95\x5f\xa9\x7f\xa5\xa5\xc3\x21\x93\x56\xcd\x11\xa8\x1b\x3c\xfb\x6b\xe8\xcf\xf0\xfe\x7e\xf4\xd5\xf5\xb9\x92\x33\xde\x9f\x4b\xce\x2c\xda\xdf\xbf\xd3\xd5\xb5\xf9\x62\xbe\x06\x00\x00\xff\xff\xb2\x4c\x59\x2e\x8e\x02\x00\x00"), - }, - "/src/crypto/rand": &vfsgen۰DirInfo{ - name: "rand", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 527715943, time.UTC), - }, - "/src/crypto/rand/rand.go": &vfsgen۰CompressedFileInfo{ - name: "rand.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 528084886, time.UTC), - uncompressedSize: 1415, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x54\x41\x4f\xeb\x38\x10\x3e\xc7\xbf\x62\xc8\xae\x56\xf1\x12\x12\x24\x04\x87\xae\x8a\xc4\x22\x84\x38\x2c\xbb\x8b\x76\xdf\x3b\x20\x0e\x76\x32\x69\x5c\x52\xbb\x6f\xec\x34\x54\xa5\xff\xfd\xc9\x71\x52\x0a\xf4\xe9\x5d\xda\x38\xdf\x37\xdf\x37\x33\x9e\x49\x9e\xc3\xb1\x6c\x55\x53\xc2\xdc\x32\xb6\x14\xc5\xb3\x98\x21\x90\xd0\x25\x63\x6a\xb1\x34\xe4\x20\x61\x51\x8c\x44\x86\x6c\xcc\x58\x14\xcf\x94\xab\x5b\x99\x15\x66\x91\xcf\xcc\xb2\x46\x9a\xdb\xb7\x87\xb9\x8d\x19\x67\xac\x6a\x75\x01\x4a\x2b\x97\x70\xd8\xb0\xe8\x01\x45\x89\x04\x53\xf8\x8d\xf4\x2c\x1c\x36\x5b\xb6\x65\xcc\xad\x97\x08\xbb\x77\x60\x1d\xb5\x85\xdb\x6c\x07\x81\x84\xe0\xf7\x1d\xc8\xc1\xff\x27\x12\x1e\x9f\xe4\xda\x21\x87\x44\x83\xd2\x2e\x05\x24\x82\x3e\xbd\xde\x4a\x10\x89\x35\x4c\xa6\x30\xb7\xd9\x9d\x76\x48\x5a\x34\x7f\xcb\x39\x16\x2e\x91\x3c\xbb\x45\x97\xc4\xbf\xf6\x9c\x98\xb3\xc8\x54\x95\x45\xf7\x13\x76\x20\xc5\xdc\x13\x12\xce\x58\x94\xe7\x20\xc9\x74\x16\x89\x45\x05\xad\x97\xce\x0c\x0a\xb7\x8d\x91\xa2\x09\x61\x01\xf0\x26\xaa\x82\x81\x35\xed\x59\xff\xeb\x12\x2b\xa5\xb1\xf4\xe9\x8e\x02\x9f\xe2\x17\xf6\x7a\xa7\xb0\xdd\x17\x39\x3a\x20\xb2\x43\x43\xec\x0c\xdd\x83\xd0\xa5\x59\x7c\x11\x4d\x8b\x36\xe6\x07\x83\x22\x0d\x53\x68\x50\x27\x92\xfb\x93\xaa\x40\xc3\x25\x5c\x9c\x9f\x9f\x5d\x04\xdc\x17\x7a\xb5\x32\xaa\x84\x7f\x5b\xe3\xc4\xcd\x4b\x81\x58\x62\x79\xe3\x7b\x0d\xae\x26\xd3\x69\x90\x6b\xf8\xe0\x36\x46\x76\x35\x6a\x2f\x3f\x73\x35\x28\x0b\x0b\x43\x08\xae\x16\x3a\x38\xa4\x20\x2c\xd8\x25\x16\xaa\x52\x58\x82\xd2\x63\x58\xed\xdc\x72\x92\xe7\x5d\xd7\x65\xdd\x59\x66\x68\x96\xff\xf7\x90\x7f\x45\x19\xba\x71\xf5\xcf\x5d\xfe\x4b\x78\x3c\x59\xa0\xab\x4d\x79\x72\xc8\xde\x57\xd6\xdb\xf8\xd3\xd6\xff\x0c\xed\xb9\x16\x4d\xf3\xb9\x3f\x29\xf4\x13\x31\xa0\xb6\x95\x61\x40\x52\x08\x57\x3f\xfe\x1f\x6b\xde\x77\x8a\xd0\xb5\xa4\x41\xa7\xa0\x55\xc3\x7a\x83\x6d\x18\x8b\x7b\x53\x62\x36\xb7\xfd\x75\x11\x7e\x6b\x15\xe1\x81\xd1\x18\x90\x98\xff\xb1\x23\xfd\xe0\x52\xa9\xcf\xf2\xcf\xb5\x43\xeb\x75\x06\x76\x76\xa7\x57\xe6\x19\xdf\x66\x6c\x90\x7d\x23\xf7\xd2\x7b\xb1\x07\xaf\xff\x5d\xcd\xe8\xe2\x74\x3f\x64\xf4\x08\xf3\xc1\xc7\x16\xec\xd7\x1f\xa0\x0f\x4d\x18\xb0\xd3\x34\xac\xa4\xcd\xee\xb1\x1b\x13\xcd\xbd\x3e\x68\xe3\x40\xac\x84\x6a\x84\x6c\x10\x94\x06\x57\x2b\x0b\xa8\x57\x8a\x8c\x5e\xa0\x76\x31\x67\xe3\x07\x40\x0a\x57\xd4\x58\x26\x15\xf8\x63\x32\x6e\xbe\x34\xa6\x49\x81\x50\x94\x7f\x89\x17\xff\x11\xe0\x9f\x71\x5f\xe3\x90\x4c\x8f\xc9\xb6\x82\x8f\x78\x54\x19\x0a\x65\xb4\x15\x87\xcb\x9d\xe2\x66\xd8\x87\xa3\xca\x23\x8f\x93\xe1\xfd\x13\x1f\xf6\x62\xd4\x15\x8d\xc5\xdd\x84\x79\x83\x29\x78\xfe\x40\x9f\x3c\x85\xb6\xbc\xeb\x97\x37\x9a\x4e\xe1\x14\x5e\x5f\xa1\x57\xef\xd7\x7b\xcb\xbe\x07\x00\x00\xff\xff\x4b\xf2\x65\x42\x87\x05\x00\x00"), - }, - "/src/crypto/x509": &vfsgen۰DirInfo{ - name: "x509", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213551650, time.UTC), - }, - "/src/crypto/x509/x509.go": &vfsgen۰CompressedFileInfo{ - name: "x509.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213460911, time.UTC), - uncompressedSize: 177, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x34\x8d\xb1\x6e\xc2\x40\x10\x05\xeb\xec\x57\x3c\x5d\x65\x27\x51\x9c\x26\x45\xd2\xa6\x88\x94\x02\x21\xfc\x05\x67\x7b\x81\x83\xf3\xed\x69\x6f\x0d\x58\x88\x7f\x47\x58\xa2\x1d\x8d\x66\x9a\x06\x6f\xdd\x14\xe2\x80\x43\x21\xca\xbe\x3f\xfa\x1d\xe3\xf2\xf5\xf9\x4d\x14\xc6\x2c\x6a\x70\xac\x2a\x5a\x1c\xd1\x76\x4a\x3d\xa2\xf8\xa1\x9d\x8b\xf1\xb8\x11\xb1\x52\xd5\xa8\x5e\x7f\x59\x6d\x2d\x12\xdf\xb1\xb8\x35\xae\xf4\xa2\x6c\x93\x26\xa4\xf0\xa4\xe5\x63\xc5\xe7\xca\xf5\x3a\x67\x93\xe6\xb1\xf8\x41\x59\x42\x50\x11\x43\x16\x89\x08\x05\x49\x0c\xfe\xe4\x43\xf4\x5d\x64\x84\x84\x3f\xc9\x7b\xd6\xff\xd6\xd5\x74\xa3\x7b\x00\x00\x00\xff\xff\xa1\x8b\x91\x39\xb1\x00\x00\x00"), - }, - "/src/crypto/x509/x509_test.go": &vfsgen۰CompressedFileInfo{ - name: "x509_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213620762, time.UTC), - uncompressedSize: 364, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x90\xb1\x0e\x82\x40\x0c\x40\x67\xfb\x15\xcd\x4d\xa0\x09\xb8\x38\x38\x1b\x07\x37\x23\x84\x1d\xb1\x90\x13\xb8\x92\x6b\x31\x12\xe3\xbf\x1b\xd1\x49\x17\xc2\xdc\xf7\x5e\x9b\xc6\x31\xae\xce\xbd\x6d\x2e\x78\x15\x80\x2e\x2f\xea\xbc\x22\xbc\x6f\xd6\x5b\x00\xdb\x76\xec\x15\x8d\x92\xa8\x75\x95\x01\x28\x7b\x57\x60\x4a\xa2\xc9\x20\x4a\xed\x8e\xbc\x1e\x99\x9b\x40\x71\xf9\x85\xa2\x34\xc4\x07\x2c\x34\x4a\x6a\xdb\x05\xc6\x31\xca\x88\xa2\x67\x56\x31\x21\x3c\xff\x2a\xa7\xf7\x64\x6e\x62\xef\x6e\x59\xee\x67\xeb\x9f\x0b\x32\xf2\xb6\x1c\x26\x34\x7e\xec\xc3\xf8\xa0\x29\xcb\x47\xf1\x15\x00\x00\xff\xff\xa4\x46\xbd\x49\x6c\x01\x00\x00"), - }, - "/src/database": &vfsgen۰DirInfo{ - name: "database", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213773794, time.UTC), - }, - "/src/database/sql": &vfsgen۰DirInfo{ - name: "sql", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213829998, time.UTC), - }, - "/src/database/sql/driver": &vfsgen۰DirInfo{ - name: "driver", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213911889, time.UTC), - }, - "/src/database/sql/driver/driver_test.go": &vfsgen۰CompressedFileInfo{ - name: "driver_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 213971471, time.UTC), - uncompressedSize: 1185, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x53\x4d\x8f\xd3\x30\x10\x3d\x93\x5f\x31\x9a\x03\x38\x60\x35\xc9\x0a\xad\x44\x24\x2e\xb0\xe2\xba\x1c\x7a\xdb\xf6\xe0\x24\x0e\x32\x18\x3b\xf8\x23\xa5\xaa\xfa\xdf\x91\xe3\x06\xa4\xd6\x6d\xc3\x25\x9e\xcc\x9b\x79\xf3\xe4\x79\x2e\x0a\x78\xd7\x78\x21\x3b\xf8\x6e\xb3\x6c\x60\xed\x0f\xf6\x8d\x43\x67\xc4\xc8\x4d\x96\x8d\xcc\xc0\xc8\xa4\xe7\x9f\xb5\x1a\xb9\x71\xdc\xac\xb9\x75\x16\x3e\xc2\xcb\xf6\x32\x7f\xc8\x5e\x1d\x3e\x69\x2d\x29\xa0\x33\x9e\x23\x85\x70\x50\x40\x3c\xd2\x7f\xd0\xfa\x2a\xf4\xb2\x6d\xf6\x8e\x13\x74\x98\x27\xf1\x98\x4a\x71\x56\x69\xc2\x2a\x99\x15\xca\x3d\xbe\x27\x55\x7a\x86\x17\xca\x55\x8f\xd7\x50\xec\x99\xb4\x41\xfd\x74\x9e\x81\xa7\x5c\x0a\xc2\xf2\x4a\x4f\x99\x4e\x47\x89\x65\x9e\x46\x4f\x1a\x2f\xe1\xb6\x86\xb9\xbf\x06\xec\xb5\x46\x0a\xdc\x98\x1a\xd0\xfe\x92\x45\x5c\x6a\x0d\xad\xf6\xb2\x53\x6f\x1c\xb4\x71\x79\xb0\x09\xa5\x1b\x0c\x53\x35\xb8\xfd\xc0\xa1\xd1\x5a\x26\x28\x1f\x16\xd1\x3d\x24\x89\x9e\x78\xcf\xbc\x74\x5f\x99\x61\x3f\xb9\xe3\xe6\xaf\x73\x28\x28\xbd\x3b\x7d\xf0\x6e\x2d\x79\x3b\xdd\x4d\x4e\x94\x90\x39\x05\x25\xe4\x92\xae\xd7\x4c\xd9\x5d\x08\xe6\x73\x41\xcb\xb9\xaa\xa2\xb8\x55\x2e\xc8\x87\x7c\xde\x5b\x88\x42\x0f\x14\x05\xac\x9f\x9f\x9e\x6b\xf8\x22\x7e\xaf\x6e\x8f\xeb\x49\xb9\x0a\xa6\xeb\xa5\x66\xd3\xee\xa7\xbf\xfb\x32\x1b\x12\x6c\x7a\xe6\xd6\xdb\x52\x1b\x7b\xa8\x8e\xf3\x6b\x9b\xc2\xff\x15\x6b\x09\xb2\xf0\x46\x91\xe1\x12\x8d\x22\x0e\x8c\xbb\xf2\xca\xfa\x61\xd0\xc6\xf1\x2e\x5a\x24\xfa\x68\x25\x2c\x05\x06\x56\x8a\x96\x83\xee\xc3\x4d\x06\xde\x63\xf6\x27\x00\x00\xff\xff\x8d\xf2\x41\x9a\xa1\x04\x00\x00"), - }, - "/src/debug": &vfsgen۰DirInfo{ - name: "debug", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 214115075, time.UTC), - }, - "/src/debug/elf": &vfsgen۰DirInfo{ - name: "elf", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 214185958, time.UTC), - }, - "/src/debug/elf/elf_test.go": &vfsgen۰FileInfo{ - name: "elf_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 214239008, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x65\x6c\x66\x0a\x0a\x69\x6d\x70\x6f\x72\x74\x20\x22\x74\x65\x73\x74\x69\x6e\x67\x22\x0a\x0a\x66\x75\x6e\x63\x20\x54\x65\x73\x74\x4e\x6f\x53\x65\x63\x74\x69\x6f\x6e\x4f\x76\x65\x72\x6c\x61\x70\x73\x28\x74\x20\x2a\x74\x65\x73\x74\x69\x6e\x67\x2e\x54\x29\x20\x7b\x0a\x09\x74\x2e\x53\x6b\x69\x70\x28\x22\x6e\x6f\x74\x20\x36\x6c\x22\x29\x0a\x7d\x0a"), - }, - "/src/encoding": &vfsgen۰DirInfo{ - name: "encoding", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215208874, time.UTC), - }, - "/src/encoding/gob": &vfsgen۰DirInfo{ - name: "gob", - modTime: time.Date(2019, 4, 6, 11, 3, 15, 126398833, time.UTC), - }, - "/src/encoding/gob/gob_test.go": &vfsgen۰CompressedFileInfo{ - name: "gob_test.go", - modTime: time.Date(2019, 4, 6, 11, 3, 15, 126473498, time.UTC), - uncompressedSize: 2598, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\x51\x6f\xdb\x3e\x0e\x7f\xb6\x3e\x05\x67\xdc\x0a\xa7\xe7\x39\x95\x93\xae\x9d\x81\x3e\xac\x5b\x77\xd8\x43\x3b\x60\x33\x70\xdb\x8a\x62\x70\x6c\x26\xd1\xea\x48\x3e\x49\x6e\x1a\x04\xf9\xee\x07\x4a\x76\x9c\xae\xff\x0d\x2b\xd0\x56\xa4\x7e\xfc\x91\xa2\x48\xca\xe3\x31\xfc\x7b\xd6\x8a\xba\x82\x9f\x86\xb1\xa6\x28\xef\x8b\x05\xc2\x42\xcd\x18\x13\xab\x46\x69\x0b\x11\x0b\xc2\xd9\xc6\xa2\x09\x59\x10\x6a\x9c\xd7\x58\x5a\x5a\x5a\x34\x56\xc8\x45\xc8\x46\x8c\x8d\xc7\x90\x7f\x7a\xff\x29\x83\x1c\x8d\xbd\x92\x55\xae\xae\x64\x05\xea\x01\xb5\x16\x15\x42\x59\x48\x98\x21\x68\x5c\xa9\x07\xac\x40\xc9\x12\xc1\x2e\x11\x66\xed\x02\xd6\xc2\x2e\xe1\xba\xd0\x1a\xe6\x02\xeb\x0a\x84\x81\xb9\x78\xc4\x2a\x61\xf3\x56\x96\x4f\x08\x23\x0b\xc7\x9d\xd7\x24\x1f\xc1\x96\x05\x76\xd3\x20\xe4\x29\x18\xab\xdb\xd2\x92\x26\xc8\x49\x10\x72\xc1\x82\x5d\xbf\x3f\x39\xdc\xff\x0a\xf3\x5a\x15\xf6\xf5\x94\x05\xc1\x77\x38\x16\xd2\x1e\x20\xf9\x21\xf2\x6d\x0c\x97\x31\xbc\x03\x70\x98\xe0\x1a\xba\x9f\x55\xd1\xdc\x7a\x1f\x77\xc7\x03\xd7\x75\x7a\xb0\x2d\xa4\xbd\xcb\x27\xa4\xf5\xc0\x27\x46\x7d\x7c\xc1\xb5\x90\xb6\xb1\x7a\x30\x39\xee\x3c\x95\x6a\xd5\xf4\x54\xb4\xae\xf1\x91\xa7\xe7\x77\xc3\x92\x40\x94\xb2\x1e\x74\x9b\x76\xac\x77\xb7\xe9\x61\x50\x57\xab\xc6\x6e\xae\x8b\xe6\xd0\xbd\x90\x16\xc6\x63\xb0\x0a\xca\x25\x96\xf7\x60\x97\x85\x85\x35\xdd\x4e\x89\xe2\x01\xa1\x00\xa9\xe4\x2b\x29\x6a\x32\x4a\x58\x10\xdc\xf4\x07\x3f\xbe\x9d\xdc\x0d\xdc\x5f\xac\x36\x9d\x3a\x1d\xce\xf4\x51\xda\xd7\x53\xe3\xb4\xe4\xc9\x21\x3f\x7f\xec\x08\xba\x03\x78\xf3\x9e\x75\x6f\xfa\xad\xd7\xdc\xde\x51\xbd\xb9\xbb\xec\x3d\xe7\xa9\xbb\xa5\x46\x40\x76\x01\x93\x84\x4f\xf9\xe9\x1b\x16\x20\x49\x69\x72\xc6\xcf\x29\x25\x76\xad\xbc\x7c\xc2\x82\x15\x16\x92\xf2\x9e\x5d\xc0\x34\x65\xc1\x5c\xc8\x05\x6a\x43\xe2\x29\x0b\x0c\xa7\x45\xe8\x1d\xf3\x90\x05\x26\x3d\x50\xa4\x21\x0b\x1e\x0a\xed\x82\xe5\x30\xe4\x1c\x2e\x7a\x21\xe2\xc9\x49\x0c\x3c\x39\x19\x0d\xc8\xf4\xaf\x90\x85\xd6\x1c\x0e\xd2\x45\xf2\xed\xc9\x1d\x5c\x80\xe1\x9d\xc4\x9d\x94\xee\xf1\xe9\x2f\xf8\xb4\xc3\xa7\x9d\xc4\x7b\x6b\xc2\xbb\xdb\x79\xdb\x39\x19\xea\x60\xaf\xf6\xb6\x47\x8d\x38\xd4\x39\x86\x23\x7c\xca\x90\xfe\x33\x43\xe7\x9d\xd0\x83\xca\x13\xd8\xb5\x62\x81\x75\xa9\x3d\xca\xb9\x6b\xa0\xac\xbb\x3e\x7e\x16\xb3\x20\xb8\xdc\x8b\xe7\x24\xbe\xeb\xc5\x57\xa7\x24\x5e\x67\xbf\x6f\xaf\x6d\xd8\x88\x30\xa3\xb8\x63\x08\x91\x56\xb8\x73\x36\x69\xf6\x6b\xcf\x6d\xa7\x19\xe4\x93\xed\xd7\x0c\x08\xfc\x3d\x83\xa3\xae\x14\x76\x31\xf0\x93\x7e\x0f\xfd\x56\x57\x16\x3b\x4f\xe6\x9d\x66\xcf\x5b\xb5\x73\x1f\x52\xdd\x85\x5d\x04\x21\x95\x5d\xe8\x0d\x7d\x1b\x67\x4f\xda\x78\xdb\xb9\x1d\xbc\xc4\xd0\x2d\x0e\x63\xea\xbb\x3d\xfb\x53\xb7\x6f\x5d\x29\x66\xbe\xce\x62\xff\xcf\x4b\xdc\x31\xec\xa7\xef\x07\xf1\x08\x76\x29\x0c\x34\x5a\xcd\x6a\x5c\x65\x7e\x33\xc8\x37\x0d\x5e\x69\xad\x74\x06\x95\xb1\xc9\xbf\x0c\x5a\x9a\xb3\x52\x59\x28\x80\xc6\xac\x15\x4a\x76\x58\x4a\x67\x61\x81\xe6\x61\xb5\xc2\x15\x4d\x6c\x88\xc6\x0b\x61\x97\xed\x2c\x29\xd5\x6a\xbc\x50\xcd\x12\xf5\x4f\x33\x2c\xba\x47\x21\x59\xa8\x6c\x7a\x7e\x96\x4d\x46\x8e\x8a\x06\x54\xf6\xe7\x09\xb5\xa5\x8a\xcf\x86\xaa\x8d\x5d\xc1\x0f\x8a\xd4\x1d\xaf\x1f\x62\x94\xe0\x7b\x8c\x9e\x8e\xb2\x11\x21\x6e\xfa\xda\x81\xa3\x61\x44\x6d\x79\x72\x1a\x43\x4a\x7f\x26\xc9\xa9\x63\xa2\x91\x95\x75\xb8\x3e\x9e\xad\xe1\x31\x18\xef\xc9\x0f\xaf\xcc\xed\xfb\xe9\xb5\x3d\x3b\x8b\xe1\xfc\x4d\x0c\x3c\x9d\x4c\xe9\x37\xe5\x93\xa9\xc3\x7e\xfe\x38\x54\x37\xbc\x82\x74\x22\x9c\x87\x7d\x24\xe1\x8d\x5a\x53\x92\xe9\x9d\xb3\x62\x85\x21\x6d\x7f\xcb\x9e\xce\xb8\x28\x5c\x62\x5d\xab\x18\x4c\x21\x6a\xa5\x43\x77\x9a\x7c\x38\x4d\x9e\x6e\x43\x77\xa1\xc2\x40\x9e\xba\x72\xdb\xb1\x60\x46\x3d\x26\x71\x1d\xb9\x67\x39\xb9\x6c\xe7\x73\xd4\x23\x16\xa0\xd6\xb4\x73\x83\xeb\x2b\x59\xaa\x0a\x75\x34\x1b\x25\x7e\x19\x59\x3e\x62\x81\x98\x03\x61\x5e\x5c\x00\x8d\x77\x6a\x51\x9b\xb8\xba\x88\x42\x74\xb0\x2c\x8c\x09\x31\x72\x6e\x68\x1c\xfc\xb0\x1c\x72\xee\xa9\x1d\xf3\x7b\xdc\x33\xfb\x65\x74\xf4\xe3\xb7\xdc\x1f\x0a\x5b\xd4\x51\x58\xe1\x33\x6e\x31\x87\x17\x7d\xd9\xbc\x47\x6c\xae\xfe\xd7\x16\x75\x64\x79\x0c\x8e\xee\x30\xb6\x79\x1f\x1c\xe0\x63\x83\xa5\xc5\x0a\x5e\x3e\xc0\x42\x59\x78\xf9\x10\xc6\x70\x4c\x46\x3e\x84\x1d\xa3\x0a\xbe\x44\x28\x66\x46\xd5\xad\xc5\x7a\x03\xa6\xd5\xfe\x5b\xa3\x7b\xde\x2a\xaa\x46\x5f\xfc\xee\x91\x4b\x5c\x2c\x96\x27\xfb\xa7\xf2\xe2\x59\x76\xe6\x51\xd8\x3d\x87\x60\x50\xda\x70\x7f\x84\x1f\x7f\x6d\xd7\x7b\xf7\xb6\x3b\x36\x7c\xdc\x50\x6f\x7e\x2e\x4a\x7c\xfe\x71\x33\x1e\x83\x3b\xb8\x90\x8b\xf1\x42\xcd\xa0\x6c\xb5\x46\x69\xeb\x0d\xb4\x06\xe9\x00\x66\x23\xcb\x04\x72\xaa\x0f\xb2\xf4\x6a\xa7\xfc\x6f\x21\xec\x7f\xb4\x6a\x1b\x28\x64\xe5\x98\xca\x42\x52\xbb\x9b\xb6\x2c\x11\x2b\x58\x2f\x51\x76\x0c\x94\x8c\xd6\xd0\x07\x57\x60\x93\x2f\xf7\xa2\x89\xc2\xd6\xd0\xdb\xe9\xb7\xc3\x11\xdb\xb1\xff\x07\x00\x00\xff\xff\x9b\x7c\x41\xd0\x26\x0a\x00\x00"), - }, - "/src/encoding/json": &vfsgen۰DirInfo{ - name: "json", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215280298, time.UTC), - }, - "/src/encoding/json/stream_test.go": &vfsgen۰FileInfo{ - name: "stream_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215335211, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x6a\x73\x6f\x6e\x0a\x0a\x69\x6d\x70\x6f\x72\x74\x20\x22\x74\x65\x73\x74\x69\x6e\x67\x22\x0a\x0a\x66\x75\x6e\x63\x20\x54\x65\x73\x74\x48\x54\x54\x50\x44\x65\x63\x6f\x64\x69\x6e\x67\x28\x74\x20\x2a\x74\x65\x73\x74\x69\x6e\x67\x2e\x54\x29\x20\x7b\x0a\x09\x74\x2e\x53\x6b\x69\x70\x28\x22\x6e\x65\x74\x77\x6f\x72\x6b\x20\x61\x63\x63\x65\x73\x73\x20\x69\x73\x20\x6e\x6f\x74\x20\x73\x75\x70\x70\x6f\x72\x74\x65\x64\x20\x62\x79\x20\x47\x6f\x70\x68\x65\x72\x4a\x53\x22\x29\x0a\x7d\x0a"), - }, - "/src/fmt": &vfsgen۰DirInfo{ - name: "fmt", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215487136, time.UTC), - }, - "/src/fmt/fmt_test.go": &vfsgen۰FileInfo{ - name: "fmt_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215520167, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x66\x6d\x74\x5f\x74\x65\x73\x74\x0a\x0a\x63\x6f\x6e\x73\x74\x20\x69\x6e\x74\x43\x6f\x75\x6e\x74\x20\x3d\x20\x31\x30\x30\x0a"), - }, - "/src/go": &vfsgen۰DirInfo{ - name: "go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215644444, time.UTC), - }, - "/src/go/token": &vfsgen۰DirInfo{ - name: "token", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215700085, time.UTC), - }, - "/src/go/token/token_test.go": &vfsgen۰FileInfo{ - name: "token_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 215741006, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x74\x6f\x6b\x65\x6e\x0a\x0a\x69\x6d\x70\x6f\x72\x74\x20\x28\x0a\x09\x22\x74\x65\x73\x74\x69\x6e\x67\x22\x0a\x29\x0a\x0a\x66\x75\x6e\x63\x20\x54\x65\x73\x74\x46\x69\x6c\x65\x53\x65\x74\x52\x61\x63\x65\x28\x74\x20\x2a\x74\x65\x73\x74\x69\x6e\x67\x2e\x54\x29\x20\x7b\x0a\x09\x74\x2e\x53\x6b\x69\x70\x28\x29\x0a\x7d\x0a"), - }, - "/src/internal": &vfsgen۰DirInfo{ - name: "internal", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529838293, time.UTC), - }, - "/src/internal/bytealg": &vfsgen۰DirInfo{ - name: "bytealg", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 528247352, time.UTC), - }, - "/src/internal/bytealg/bytealg.go": &vfsgen۰CompressedFileInfo{ - name: "bytealg.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 528670257, time.UTC), - uncompressedSize: 181, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\xcb\xb1\x0a\xc2\x30\x10\xc6\xf1\x39\xf7\x14\x9f\x5b\x8b\x85\xee\x42\x47\x9f\xa2\x74\xb8\x8b\x97\x12\x3d\x52\x4d\x9b\x41\xa4\xef\x2e\x29\xb8\xb8\x1d\xff\xfb\x7e\x7d\x8f\xb3\x94\x68\x37\xdc\x57\xa2\x27\xfb\x07\xcf\x0a\x79\x6f\xca\x36\x13\x85\x92\x3c\xae\xaf\xc2\xd6\x70\x07\xc1\x38\xd5\x57\x0b\x59\x16\xc3\x87\x5c\x0c\x30\x4d\x0d\xb7\x38\x0d\xc7\x25\x6d\xcd\x2e\xeb\x56\x72\x42\x60\x5b\x95\xdc\x4e\x2e\x2c\x19\xb1\x83\xc7\x65\x40\xe6\x34\x2b\xf8\x18\xc6\x00\x5f\xad\x8c\x71\x3a\xc2\x1f\xad\x76\xa7\x5f\xdc\x72\x51\xda\xe9\x1b\x00\x00\xff\xff\x11\x57\xe4\x4d\xb5\x00\x00\x00"), - }, - "/src/internal/cpu": &vfsgen۰DirInfo{ - name: "cpu", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 528796048, time.UTC), - }, - "/src/internal/cpu/cpu.go": &vfsgen۰FileInfo{ - name: "cpu.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529123771, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x63\x70\x75\x0a\x0a\x63\x6f\x6e\x73\x74\x20\x28\x0a\x09\x43\x61\x63\x68\x65\x4c\x69\x6e\x65\x53\x69\x7a\x65\x20\x20\x20\x20\x3d\x20\x30\x0a\x09\x43\x61\x63\x68\x65\x4c\x69\x6e\x65\x50\x61\x64\x53\x69\x7a\x65\x20\x3d\x20\x30\x0a\x29\x0a"), - }, - "/src/internal/fmtsort": &vfsgen۰DirInfo{ - name: "fmtsort", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529240051, time.UTC), - }, - "/src/internal/fmtsort/fmtsort_test.go": &vfsgen۰CompressedFileInfo{ - name: "fmtsort_test.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529575759, time.UTC), - uncompressedSize: 1103, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x53\x41\x8f\xd3\x3c\x10\x3d\xc7\xbf\x62\xbe\x48\xdd\x2f\x81\x90\xb6\x80\x38\x74\x29\x97\x15\x20\x40\x2a\x48\xbb\xf7\x95\xd7\x99\x34\x6e\x52\x3b\xb2\xa7\x29\x15\xdb\xff\x8e\xc6\x75\xb7\x5d\x15\xc1\xa5\xb5\x3d\x6f\x66\xde\x7b\x33\x19\x8f\xe1\xe5\xc3\x46\x77\x15\xac\xbc\x10\xbd\x54\xad\x5c\x22\xd4\x6b\xf2\xd6\xd1\x3d\xa1\x27\x21\xf4\xba\xb7\x8e\x20\x13\x49\xba\x96\xd4\xa4\x22\x49\x1d\xd6\x1d\x2a\xe2\x23\x63\xb4\x59\xa6\x42\x24\xa9\x36\x84\xce\xc8\x6e\x1c\x0b\xa4\x22\x17\x62\x3c\x06\x83\x58\xf9\xdb\x56\xf7\xe0\x90\x6b\x79\xd8\x36\x48\x0d\x3a\xa0\x06\xa1\xd5\xa6\x82\xca\xa2\x37\xff\x13\x6c\xad\x6b\xa1\xb6\x0e\x38\x5f\x9b\x25\x58\x03\x9f\x6d\xdf\xa0\xfb\x7a\x5b\x8a\x7a\x63\xd4\xa9\x5a\xd6\x42\x24\x52\x7e\xd3\xa6\xca\xe1\xc1\xda\x0e\x7e\x89\xc4\x6f\x35\xa9\x06\x5a\x3e\x2b\xe9\xf1\x09\xf6\x83\x5c\xf1\x74\xb9\x69\xa4\x99\x89\x24\x71\x48\x1b\x67\x80\xdc\x06\x45\xb2\x17\xc7\x7b\x2d\x3b\x8f\x62\x1f\x04\x2c\x2c\xe1\x0c\xfc\xce\x28\xd8\x6a\x6a\x02\x6d\xeb\xf4\x52\x1b\xd9\xc1\x1d\x7a\xba\xb1\xeb\x5e\x3a\x8c\x0c\xcf\x5e\x32\x82\x17\xd1\xa2\xf2\x2e\x67\x42\x2c\xee\xbe\x00\x7e\x84\xd9\x1c\x9c\x34\x4b\x04\x75\x40\x73\xa2\x67\x50\x40\xe9\x02\x86\xc9\x09\x13\x32\x38\x16\x82\xab\x02\x86\xe9\x9f\x82\x89\xae\xcf\x2c\x1a\x26\xc1\x9b\x2c\xcf\x63\x34\x51\xd6\x90\x36\xac\x35\x49\x58\xee\x45\xc6\xf4\x1f\x19\xe1\x4f\x71\xeb\x38\xe6\xf2\xa8\x75\x98\x30\xa9\x3c\x00\x06\xe9\x00\x7f\xf6\xa8\x08\xb4\xa1\xf0\x14\xc7\x72\xa8\x1a\xe6\xa2\x61\x3e\x87\xd5\xec\xd0\x26\xa2\xe7\x30\x39\xdc\xd9\x77\xb9\xf0\x20\x1d\x02\x39\xad\xda\x5d\x79\x08\xe8\x1a\x68\xd7\x33\x81\x61\x52\xde\xed\x7a\xcc\xf2\x6b\xc8\x68\xd7\x47\xe2\x5c\xf4\x38\xe4\x4f\x9d\x95\xf4\xe6\x35\x3c\x3e\xc2\x5f\x00\xef\xde\xe6\x70\x75\x05\xbc\xde\xe5\x17\xbf\x90\x0b\xf6\x2d\x44\xce\x6c\x38\x11\x7c\x35\x3d\xbc\xec\xcf\x95\xbc\xbf\x14\x12\x71\x11\xf0\xe1\x12\x30\x7d\x3e\x04\x05\xff\xcd\x8f\xa6\xc5\xa6\x54\x7e\x74\xce\xba\x3a\x4b\x47\x7e\x76\x5c\x93\x6c\x34\x14\xa3\x21\x9f\x8f\xaa\xeb\x23\x7c\x54\xa5\xc5\xc9\x0e\x3e\xf2\x28\x0a\x50\x45\x44\xe4\xa7\x56\xfc\xb3\xe7\x55\xdf\x8b\xd3\xbe\x7e\x77\x15\xba\xcb\x6d\xa5\x32\x2c\x45\xda\x1a\xbb\x35\xa0\xbd\xdf\xe0\x0c\x8c\xee\xa0\xc5\xdd\xb3\x8f\x36\xcd\xc5\x5e\xfc\x0e\x00\x00\xff\xff\xd0\xa4\x01\x39\x4f\x04\x00\x00"), - }, - "/src/internal/poll": &vfsgen۰DirInfo{ - name: "poll", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529702349, time.UTC), - }, - "/src/internal/poll/fd_poll.go": &vfsgen۰CompressedFileInfo{ - name: "fd_poll.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529772376, time.UTC), - uncompressedSize: 1931, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\x41\x6f\x2a\x37\x10\x3e\xaf\x7f\xc5\x28\x97\xec\x12\xd8\x4d\xdb\x5b\x14\x0e\x15\x69\xd2\x48\x55\xa9\x92\x48\x39\x20\x1a\x19\x7b\x80\x49\xbc\xb6\x6b\x7b\x83\x10\xca\x7f\xaf\xbc\xbb\x04\x48\xe0\x85\xf7\xa4\x77\x02\x79\x66\xbe\xf9\xbe\x6f\x67\xa6\x28\xe0\x6c\x52\x91\x92\xf0\xec\x19\xb3\x5c\xbc\xf0\x19\x82\x35\x4a\x31\x46\xa5\x35\x2e\xc0\x49\xa0\x12\x4f\x18\x2b\x8a\xfa\xfd\x0a\xbd\x00\xf2\xc0\x41\x9b\x9e\xb1\x40\xa5\x55\x58\xa2\x0e\x3c\x90\xd1\x60\xa6\xc0\x35\xdc\x16\xc3\x3a\x19\x1d\x4c\x8d\x83\x9b\xe1\xef\x77\x83\x3f\xfb\xcf\x3e\x67\x45\x11\x81\x6e\x83\xff\x58\x48\x1e\x26\xdc\xa3\x04\xa3\xe1\x6f\x3e\xf8\x0b\x48\xc3\x4c\x80\x30\xa5\xa5\x88\x93\x7a\x44\xb8\x19\xde\x0d\x87\x0f\x85\x77\xa2\x20\x1d\xd0\x69\xae\x8a\xd8\xa7\x98\xca\xa7\xf8\xfb\xa4\xb9\x50\xf9\xcc\x64\xdd\xd8\x65\x52\x05\xa0\x00\xd2\xa0\x07\x7c\x45\x0d\x0a\xbd\xcf\x59\x58\x5a\xdc\x48\xf1\xc1\x55\x22\xc0\x8a\x25\x42\x19\x4f\x7a\x06\x13\x63\x14\x7b\x63\x6c\x5a\x69\x01\xa9\x95\xd0\x59\x27\x67\x40\x9a\x42\x3a\x95\xd0\xb9\xbe\xca\x00\x9d\x33\x0e\x56\xe0\x30\x54\x4e\x83\x26\x05\x07\xca\x22\x34\xa6\x19\xac\x0e\xc4\xf1\x95\x44\x88\x71\xb0\x32\x5f\xf3\xe8\x43\x70\x15\x1e\x82\xb4\x0e\x2d\x77\x98\x96\x46\x22\x90\x0e\x5d\x20\x7f\x4d\x0a\x6b\xfa\xef\xdc\x58\x42\xd3\x6d\xcc\x15\x4b\x92\x96\x2e\x3a\x37\x68\x5e\xd3\xa6\x32\x63\xc9\x1b\x4b\x36\x62\x0e\x79\xd0\x76\xbe\x43\x2e\xd3\x7d\x3d\xd7\x7e\x58\x99\xaf\x49\x9e\xba\xd3\x35\xbf\xec\x0b\x41\x8f\x8e\x02\x1e\x8d\xbb\xf8\x1a\x77\xc1\x29\xfc\x2c\x97\xfe\x70\xee\x81\x4a\x34\x55\x38\x64\x56\xec\x7e\x8c\x53\x35\xcb\x63\x6c\x8a\x89\x47\x79\xd4\x20\xee\x35\xe8\x03\xdc\x80\x6b\x81\x0a\xe5\xbb\x4b\xdb\x83\xba\xfd\x85\x8c\x52\x7c\xa2\xe2\x20\xc7\xa6\x9b\x6e\xbb\x73\x5a\xef\xc6\x3d\x86\x2b\xe4\x52\x91\xc6\x34\x40\x3c\x21\x79\x74\xea\xdb\x4b\xb3\xae\x8c\x86\xfd\x78\x75\xed\xce\xf7\x94\x17\x05\xfc\xd3\x8a\x74\x64\x83\x71\x6d\xdc\x43\x98\x23\xc8\xcd\xf3\x04\xe3\x74\x54\xf1\x4a\x4d\x96\x75\xb0\x39\x72\xf5\xb5\x31\x0e\xfe\xad\x48\x07\x1b\x5c\x7a\x9e\x01\x4d\x63\x82\x43\x20\xaf\x4f\x03\x18\x8d\x39\x3c\xcc\xc9\xc7\x43\x67\xb4\x5a\x36\x30\xf1\x3a\x06\xf4\x81\xf4\x2c\x6f\x64\xec\x32\x49\x33\x68\x31\xe3\x50\xb6\xb4\xb7\xda\xb0\x86\xfe\xc0\xd8\x65\x3c\xbd\x7e\xa9\x45\xee\x2a\x1d\x25\x3f\xdd\x63\xc9\xc5\x7f\x15\x39\x6c\xa1\x3f\x07\x52\x0f\x9d\x08\xf6\xdb\xaf\x59\xbb\x05\x1d\x0f\xfd\x3e\x9c\xd7\x2b\x20\xe6\x70\xd1\x87\x92\xbf\x60\x2a\xe6\x5c\x37\x93\xc6\x92\xc4\x63\xf9\xc8\x29\xa0\xf3\x23\x3f\x86\x3e\x70\x6b\x51\xcb\x74\xe7\xb9\x0b\x62\x1e\x73\x2f\x7b\x62\x5e\x6f\x4c\xc7\xf7\x7a\x5f\xb0\x75\xa8\x90\xfb\x3d\x6c\xdb\xc0\x07\xb6\x1d\x7f\x76\xc6\x58\xb2\x88\x24\x77\x7a\xd7\x42\x14\xea\x74\x91\x6d\xc4\x34\xde\x45\x2a\xac\x15\xb6\x18\x9d\x8f\x63\x79\xfc\xf7\xcb\xc5\x98\x7d\xd2\xb5\xd8\x0b\x24\x51\x61\xc0\x2d\xb5\x5d\xf0\xd9\x3b\xee\x65\xaf\xde\x86\xa8\xf4\x95\xbb\x2d\x5e\xd0\x3a\x59\x72\x3b\x6a\x55\x8c\x47\xe3\x2d\x5f\xff\x0f\x00\x00\xff\xff\x9e\x79\xbb\x91\x8b\x07\x00\x00"), - }, - "/src/internal/syscall": &vfsgen۰DirInfo{ - name: "syscall", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529879090, time.UTC), - }, - "/src/internal/syscall/unix": &vfsgen۰DirInfo{ - name: "unix", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 529929191, time.UTC), - }, - "/src/internal/syscall/unix/unix.go": &vfsgen۰CompressedFileInfo{ - name: "unix.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 530273886, time.UTC), - uncompressedSize: 149, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x3c\xc8\x3f\xaa\xc2\x40\x10\x07\xe0\xfa\xcd\x29\x7e\x65\xc2\x0b\xc4\x0b\x78\x00\x1b\x2b\x2f\x30\xd9\x3f\x61\xcd\x3a\x13\x66\x67\x41\x10\xef\x2e\x42\xb0\xf8\x9a\x6f\x9e\xf1\xbf\xf4\x52\x23\xee\x8d\x68\xe7\xb0\xf1\x9a\xd0\xa5\x3c\x89\x82\x4a\x73\x18\x4b\xd4\xc7\xcd\x78\xc7\x19\xa7\x23\x73\x73\x76\xf6\xdf\x52\xee\x12\x70\x69\x57\x95\xa5\x6a\xd8\x86\x1c\x51\xc4\x47\x0c\x72\x4c\x91\x15\x8b\x6a\x9d\x90\xcc\xbe\xd4\x46\xbc\xe8\xcf\x92\x77\x13\x64\xae\x2d\x4d\x90\x52\xe9\x4d\x9f\x00\x00\x00\xff\xff\x38\x6c\xdd\x48\x95\x00\x00\x00"), - }, - "/src/internal/testenv": &vfsgen۰DirInfo{ - name: "testenv", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 216787484, time.UTC), - }, - "/src/internal/testenv/testenv.go": &vfsgen۰CompressedFileInfo{ - name: "testenv.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 216853257, time.UTC), - uncompressedSize: 424, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x8f\xc1\x6a\xc3\x30\x0c\x86\xcf\xd1\x53\x08\x9f\x12\x36\x92\xfb\x6e\xa3\x8c\xf5\xd6\xb2\x3e\x81\xeb\x2a\x8d\xbb\x58\x2e\x92\xb2\xb4\x8c\xbe\xfb\xf0\xd6\x52\xd8\x06\x3e\xfd\x9f\xfd\xf1\xb9\xeb\xf0\x61\x3b\xc5\x71\x87\x07\x05\x38\xfa\xf0\xee\xf7\x84\x46\x6a\xc4\x1f\x00\x31\x1d\xb3\x18\xd6\x50\x39\x99\xd8\x62\x22\x07\x95\x53\x93\xc8\x7b\x75\xd0\x00\x74\x1d\x2e\xbd\xbe\x9c\x28\xa0\x50\xb9\xac\x38\x0f\x64\x03\x09\xda\x40\x18\x26\x11\x62\x43\x3d\xab\x51\xc2\xe0\x19\xd5\xbc\x18\x32\xcd\x78\x94\x1c\x48\x95\xb4\x58\x26\x8d\xbc\xc7\xac\xed\xa6\xf0\xf5\x0f\xc2\x2c\x58\xa7\x2c\x84\x21\xa7\x94\x79\x3c\x37\x48\x27\x0a\xed\x22\xa7\xe4\x79\xd7\x42\x3f\x71\xb8\x15\xd4\x0d\x6e\x73\x1e\xf1\x13\x2a\x9d\xa3\x85\x01\xaf\xd1\xed\xeb\x6a\xb5\x29\x73\xf0\x4a\xe8\xd8\x87\xd1\x3d\x41\x55\x09\xd9\x24\x8c\xbd\x1f\x95\x6e\x70\xe7\x65\x8e\xfc\x8d\x63\x8f\xd7\xaf\xb6\x4b\xaf\x6b\xa1\x3e\x9e\xea\xbb\xf2\xf9\x6d\xb1\x7c\x44\xe7\x25\xb9\xa6\xc8\x7f\xfb\xaa\x0b\x94\xf3\x27\xa5\xbc\xbb\xc7\x1c\xf4\x9f\x94\x0b\xdc\x06\x93\x89\xe0\x02\x5f\x01\x00\x00\xff\xff\xdc\xf8\xeb\x9e\xa8\x01\x00\x00"), - }, - "/src/io": &vfsgen۰DirInfo{ - name: "io", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 217482019, time.UTC), - }, - "/src/io/io_test.go": &vfsgen۰CompressedFileInfo{ - name: "io_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 217540394, time.UTC), - uncompressedSize: 574, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\xd0\x41\x4b\xfb\x40\x10\x05\xf0\x73\xf7\x53\x0c\xbd\xfc\x9b\xbf\x92\x7e\x06\x29\x46\x10\xbc\x98\x82\xc7\xb2\x26\xcf\x64\xec\x66\x76\x99\x9d\x45\x51\xfc\xee\xd2\xa6\xa7\x52\x0f\xde\x3c\x2d\x3c\x78\xcb\xef\xcd\x7a\x4d\x57\xcf\x85\x43\x4f\xaf\xd9\xb9\xe4\xbb\xbd\x1f\x40\x1c\x77\x86\x6c\xce\xf1\x94\xa2\x1a\xad\xdc\x62\x79\x08\x58\x86\xa5\xab\x9c\x7b\x29\xd2\xd1\x16\xd9\x1e\x4a\x30\x7e\x52\x36\xe8\xee\xf8\xb4\xa6\x2c\x43\xcb\x32\x04\xdc\x84\x10\xbb\x95\xd1\xff\x53\xb5\xde\x56\xf4\xe9\x16\x56\xb7\x7b\x4e\xab\xca\x7d\x9d\x7f\xf4\x08\xdf\x43\x9b\xe0\xcd\x20\x3f\x16\x8f\x12\x52\x04\x46\xa6\x28\xa4\x45\x8c\x27\xd4\x1b\x1f\x02\x34\x93\x97\xfe\x3c\x6b\xd4\x4f\xc8\xd7\xf4\x36\x72\x37\xd2\x5d\x4c\x23\xf4\xbe\xa5\x3e\x22\xcb\x3f\xa3\x5c\xd2\x61\xe6\xf2\x02\x69\xde\x36\xef\xd9\x8c\x9e\xe5\x4f\xe9\x4e\x07\x53\x20\xdf\xbe\x8f\xbe\x64\x43\x3f\x67\xf9\xd7\xc0\x16\xd6\xb0\xf8\xc0\x1f\xd0\x8b\x16\x92\x68\xc4\x53\x0a\x98\x20\x33\xe7\x3b\x00\x00\xff\xff\x75\x6f\xe1\xab\x3e\x02\x00\x00"), - }, - "/src/math": &vfsgen۰DirInfo{ - name: "math", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 531261141, time.UTC), - }, - "/src/math/big": &vfsgen۰DirInfo{ - name: "big", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 217805546, time.UTC), - }, - "/src/math/big/big.go": &vfsgen۰CompressedFileInfo{ - name: "big.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 217740972, time.UTC), - uncompressedSize: 174, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x44\x8d\xbd\xaa\xc3\x30\x0c\x46\x77\x3f\x85\xf6\x0b\x11\x5c\x68\x87\xcc\xdd\x03\x25\xd0\xd9\x89\x15\xdb\xf9\x93\x91\xe4\x94\xbe\x7d\x49\x3b\xf4\x9b\xbe\xe1\x70\x0e\x22\xfc\x0d\x35\xaf\x01\x66\x75\xae\xf8\x71\xf1\x91\x60\xc8\xd1\x39\x44\xe8\xbb\x5b\xd7\x42\x9f\xb2\x42\x56\xf0\xf0\x64\x59\xbc\x70\xdd\x03\x4c\x2c\x90\xcc\x8a\xb6\x88\x31\x5b\xaa\x43\x33\xf2\x86\x91\x4b\x22\x99\xf5\x77\xb2\x6a\x25\xc5\xeb\xe5\xbf\x39\x95\xdf\xdd\x69\xe3\x83\xc0\x4f\x46\x02\x96\xbc\xc1\x07\x3b\x2b\x42\xca\xeb\x41\xa1\x71\xf6\x2a\x04\x0f\x96\x00\x35\xef\x56\x4c\xdc\x3b\x00\x00\xff\xff\x55\xc0\x14\x01\xae\x00\x00\x00"), - }, - "/src/math/big/big_test.go": &vfsgen۰CompressedFileInfo{ - name: "big_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 217834611, time.UTC), - uncompressedSize: 148, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd7\x57\xd0\x4e\x2a\xcd\xcc\x49\x51\xc8\x2a\xe6\xe2\x2a\x48\x4c\xce\x4e\x4c\x4f\x55\x48\xca\x4c\xe7\xe2\xca\xcc\x2d\xc8\x2f\x2a\x51\x50\x2a\x49\x2d\x2e\xc9\xcc\x4b\x57\xe2\xe2\x4a\x2b\xcd\x4b\x56\x08\x49\x2d\x2e\x71\xaa\x2c\x49\x2d\xd6\x28\x51\xd0\x82\xca\xe9\x85\x68\x2a\x54\x73\x71\x96\xe8\x05\x67\x67\x16\x68\x28\x25\x15\xe5\x67\xa7\xe6\x29\x69\x72\xd5\x22\xe9\xf1\xcd\x4f\x09\x2e\x2c\x2a\xc1\xad\xab\x38\x27\xbf\x1c\xac\x07\x10\x00\x00\xff\xff\x9b\x59\x2d\xf0\x94\x00\x00\x00"), - }, - "/src/math/bits": &vfsgen۰DirInfo{ - name: "bits", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 530550273, time.UTC), - }, - "/src/math/bits/bits.go": &vfsgen۰CompressedFileInfo{ - name: "bits.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 530755801, time.UTC), - uncompressedSize: 314, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x8e\xc1\x4a\xc5\x30\x10\x45\xd7\x9d\xaf\xb8\x74\x95\x20\xbc\xec\x05\x97\xfe\x80\x3f\x20\xed\xeb\xbc\x32\xda\x26\x65\x92\x54\x6a\xf1\xdf\xc5\x24\x82\xb8\x7a\x9b\x2c\xce\xcd\x39\x8c\x73\x78\x18\xb3\x2c\x13\xde\x22\xd1\x36\x5c\xdf\x87\x99\x31\x4a\x8a\x44\xe9\xd8\x18\xaf\xac\x8a\x98\x54\xfc\x4c\x74\xcb\xfe\x0a\x53\xa1\xc5\xb3\x6a\x50\x63\xdb\x8a\x93\x3a\xe5\x94\xd5\x37\x60\xd8\xd2\x17\x91\x73\x78\xc9\x3e\xc9\xca\xe5\x3f\x64\xdd\x16\x5e\xd9\xa7\x08\xad\xfc\x52\x86\xcb\xbf\xfa\x5f\xc9\x58\x9c\x3f\xad\x7d\x50\x18\xea\xc2\xce\x7a\x5b\xc2\x47\x0d\x72\x79\x9f\x8a\x66\xfa\xd6\xac\xf4\x11\xe2\x13\xcf\xac\xf8\x55\x7a\x4b\xdd\x24\xbb\x4c\xed\x1a\xdc\xa7\x57\x05\xe3\x81\x4f\xd6\xd0\x5b\xb2\xf4\x1d\x00\x00\xff\xff\x76\x78\x13\x86\x3a\x01\x00\x00"), - }, - "/src/math/math.go": &vfsgen۰CompressedFileInfo{ - name: "math.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 531123377, time.UTC), - uncompressedSize: 4581, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6e\xdb\x38\x13\xbd\xb6\x9e\x62\x3e\xe3\x43\x57\xda\x2a\xb2\xe5\x04\x41\x51\xc4\x05\xba\xc1\xa6\x5b\xa0\xed\x2e\x36\xed\xde\x04\xbe\xa0\x64\xd2\xa6\x2b\x91\x2a\x49\xc5\x72\x9b\xbe\xfb\x82\xd4\x1f\x25\x5b\xb1\xbd\x57\xb6\xc8\x33\x67\xce\x8c\x66\xc8\xd1\x64\x02\x2f\xa3\x9c\x26\x4b\xd8\x48\xc7\xc9\x50\xfc\x15\xad\x30\xa4\x48\xad\x1d\x87\xa6\x19\x17\x0a\x5c\x67\x34\x5e\x51\xb5\xce\xa3\x20\xe6\xe9\x64\xc5\xb3\x35\x16\x1b\xd9\xfe\xd9\xc8\xb1\xe3\x39\xce\x23\x12\xc6\x10\xe6\xb0\x91\xc1\xbb\x84\x47\x28\x09\xde\x61\xe5\x8e\x3f\x22\xb5\x1e\x7b\x06\xf0\x1d\x0b\x0e\x24\xe1\x48\x5d\x5f\xc1\x1c\xa6\x66\x31\xe3\xf2\x3d\x23\x30\x87\x10\x26\x06\x61\x56\x19\x5e\x95\xab\x17\xdd\x65\xc4\xb4\x61\xbd\xe4\x90\x9c\xc5\xf0\x36\xe6\xd2\x2d\x6a\x62\xaf\xf1\xf0\xc3\x19\x09\xac\x72\xc1\x8c\xb2\xe0\x16\x25\x89\x3b\x46\x31\x97\x63\x1f\x0a\x2f\xb8\xd3\x30\xd7\x73\x7e\x5a\x34\xeb\xb3\x78\xd6\x03\x44\x92\xb2\xd3\x79\x24\x65\xc3\x34\x67\xe8\xd1\xe8\x01\x22\x85\xce\xd0\xa3\xd0\x90\x1e\x85\xce\xd1\xa3\xd1\xc3\x44\x33\x77\xe7\xc3\x39\x5c\xb3\xb1\x0f\xbb\x83\x74\xb7\x91\x50\x27\xcb\x8a\x23\xa1\x0e\xab\xba\xc5\x34\x39\x9d\x06\xd3\x64\x80\x86\x67\x3b\x49\x57\xcc\x2d\x7c\xd8\x1d\x64\xa3\x04\xdc\x02\x6e\x60\x0a\x4f\x4f\x10\x4e\x0a\x98\xcf\xab\x72\xf7\xe0\x7f\x73\x70\x77\xed\xde\xce\xde\xfb\xe1\x8c\x6a\x25\x17\x85\x33\xfa\xd9\xe8\x2a\x2c\xe7\xa7\x37\xc2\x60\x1f\xdc\x9e\xd3\x06\xc3\x5d\xf0\xbb\x20\xcf\xb3\x60\x0d\xe8\xe0\xe3\xa3\x06\x71\xc7\xa2\xc8\x4e\xd6\x89\x8b\x6c\x40\x66\x91\xcd\x4e\x66\xc9\xf8\x76\xec\xc3\x6c\x88\x28\x0d\x8f\x04\x50\x42\x5a\x9b\xbb\x84\x73\x71\xb2\x77\xa2\xd1\x87\xa3\xb8\x13\xb8\xc8\x5c\xd2\x12\xb9\x44\xa0\xb8\x7e\xf4\xb5\x67\xa0\x4c\x79\x16\x31\x29\x4d\x5a\x8e\x3f\x76\x19\x57\x6e\xe6\xc3\xb7\xe7\xf4\xac\x1b\x54\x6b\xf9\x9e\x11\x57\xd7\x7c\xe9\xc2\xb2\x91\x5b\xaa\xe2\xb5\xfe\x17\x23\x89\xc1\x60\xde\xcc\x61\xfa\xba\x2d\xe5\xf2\xf8\x77\x46\x4b\x4c\x50\x9e\x28\x6b\xa7\xac\x7b\x5d\xe8\x8d\x1f\x0d\x6d\xa3\xf4\xa1\x75\x1a\x71\x9e\x54\xcd\x45\x74\xd3\x54\xb7\x8a\xd5\x33\x8d\x73\xd3\x3a\x35\xae\xba\x67\xfa\xb8\x9b\x1a\x57\x27\x0b\x25\x12\x5b\x3a\x3e\xa1\x4f\x9d\x6c\x53\x69\x14\x74\xf2\xab\x9b\x99\x34\x36\x1f\x96\x26\xdd\x87\xdf\x4a\xf7\x74\xb8\x08\xa7\xb3\x2b\xb8\x31\xdb\x2f\x5e\x98\x9f\x1b\x30\x6b\x3f\x60\x32\x81\x2f\x12\x83\xbe\x54\x83\x8c\x6f\x81\x70\x01\x32\x45\x49\x62\x60\x8f\x28\xc9\xb1\x84\xed\x1a\x0b\x0c\x54\xfd\x22\xe1\x91\xa2\x28\xc1\x01\xdc\x71\x01\x19\x16\x84\x8b\x14\xb1\x18\x07\xce\xc8\xa4\x40\xcb\x99\xeb\x0b\x55\x27\xa0\xad\x0c\x14\x3b\x23\x1d\xbd\xbd\x02\xbf\x1e\xec\x04\x5c\x64\x6d\x39\x5a\x19\x4b\x9a\x78\x4b\x4c\x9b\x08\xbe\x1a\xa8\x78\x4a\xa0\xd0\x49\x2b\xca\x38\xb7\x5c\x7c\x45\x82\xe7\x6c\x69\xa2\xe4\x99\xa2\x29\xfd\x8e\x05\x44\xf9\x0a\x28\x83\x7f\x5e\xf9\x20\x70\xca\x1f\x31\x20\x05\x92\xa7\x18\x32\x4e\x99\xb2\x2a\x08\x31\x5b\x92\x25\x3f\xe1\xab\xc3\x8d\xf4\x81\xaf\xc2\xe9\xf3\x1d\x99\x94\x90\xae\xcd\x91\x93\x28\x29\x21\x1d\x9b\x23\xc7\x4e\x62\x10\xad\xc5\x47\x54\x0c\xdf\x29\x4d\x84\x25\xc6\xb2\xa2\xcf\xdc\x44\xb5\x55\x85\xb1\xac\xf8\xf2\xa8\x55\x3b\xe6\x95\x29\xfd\x7f\xca\x97\x3a\xa7\x9a\x68\x2f\xad\x1f\xf9\x92\x74\x8f\xa7\xba\x07\x9a\xa5\xfd\xe6\x7d\x7a\x1a\xea\x51\xe2\x37\xef\x96\x12\x08\x27\xc3\x30\x73\x7e\x8c\x4c\xfd\xbe\x9e\x9b\xb8\x88\x0f\xa1\x67\x75\xe9\x05\x94\x45\x6a\xaa\xbe\xd6\xab\xfb\xfb\x50\xd0\xda\x6b\x8d\xf9\x8b\x6f\x9f\xbd\xe4\xcd\xc5\x1e\xea\x28\x5c\xf3\xf7\x22\xd4\xdd\xec\xee\xba\x11\xda\x57\x7c\xe7\x8e\x0f\x07\x4a\xb7\xec\xbc\xc3\x69\xfe\x1b\xa7\x88\xb2\x25\x16\x47\xdf\x9e\xe8\x20\x5b\x86\x7b\xba\x62\x11\xed\xcc\x53\xf5\xd1\x5a\x4f\x1b\x07\x47\x17\x8b\xe0\xf4\x59\x73\x70\xf4\xbd\x3f\x67\xf2\x1d\x1e\x7c\xef\x29\xeb\x7d\x1a\xb8\x92\x32\x1f\x62\x2e\x3b\x75\x57\x71\x1a\xe9\x9e\x5f\x4e\x51\x16\xcb\xb7\x33\xe6\x4b\xf9\x6d\x68\xbe\xfc\x7c\xc6\x10\x3e\x38\x83\x7f\x3e\x67\x04\x1f\x9e\xc0\x3f\x8b\x9c\x0d\x0d\x5b\x75\xe5\xb6\x25\x6a\xbd\xe6\xf2\xd1\x9c\xd1\xfd\x0a\xb0\x6b\xb7\x33\x9e\x36\x13\x71\xe5\xc4\xa5\x4c\xb9\x85\xe7\x69\x65\x5a\x91\xfe\xae\x8b\x72\x02\x52\x89\x3c\x56\x9a\x26\xa7\x4c\x5d\xce\x90\x10\x68\x07\xf0\x30\x5b\x94\xcf\xce\xc8\x10\xd4\x1b\x0f\xb3\x45\xf5\x5c\x6d\x5c\x5f\x55\x1b\xe1\xa2\x7a\x6e\xe2\xa5\x8c\x2a\xd7\xbc\x6a\x14\xe9\x73\xa0\xf7\x89\xfa\x56\xdb\xfd\x96\x13\x82\xc5\xd8\x0b\x3e\xe1\xad\xfb\xca\x73\x46\x1b\x19\xbc\x67\x0a\x0b\x86\x92\x3f\xa3\x0d\x8e\x95\x1b\xe5\xc4\x0b\xee\xb5\x85\xa5\x70\xec\xf7\xe9\xbe\x98\x4d\x43\x5a\xd1\xa1\xc8\x3b\x42\x68\x87\xb6\xcf\x78\x57\xee\xfe\x07\xca\x2a\x29\x03\x94\xd7\x57\x7b\x94\xd6\x68\xaa\x5d\x46\x54\xc9\xfa\xe0\xbe\x9c\x79\x50\x06\xae\x33\x19\xe5\x24\xb0\x55\x3f\x4c\x17\xa0\x07\x9e\xfa\xb5\xeb\x7d\x2b\x4d\x0f\xd3\x45\x9f\x9b\x08\x9e\x1a\xfe\xa8\xa2\xf5\x6a\x3f\x35\x7f\xd7\x1e\xe6\x10\x75\xe8\x7b\xee\xbb\xfc\xd7\x57\xb6\x76\x5d\xe4\x9a\xad\xac\xf1\xc6\xb8\x4a\x4f\x5f\x7b\x89\x74\xfb\x12\xc2\x85\x77\x73\x73\x39\x83\x97\x43\x80\xe9\xc2\xeb\x8b\xe8\x05\xd9\x6b\xb6\x83\x41\x96\x0b\x6e\xe4\xed\xef\x87\xf6\x3e\xbc\x79\x03\x97\x33\x6f\x3f\x25\x6d\x54\xce\x4f\xe7\xdf\x00\x00\x00\xff\xff\x85\x20\xa4\x35\xe5\x11\x00\x00"), - }, - "/src/math/math_test.go": &vfsgen۰CompressedFileInfo{ - name: "math_test.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 531663209, time.UTC), - uncompressedSize: 704, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x92\x3f\x6f\xdb\x30\x10\xc5\xe7\xf0\x53\x3c\x78\x89\xdd\xca\x16\x02\xb8\x19\xba\x78\x69\x50\x64\x28\x5c\x20\xde\x8b\x93\x7c\x92\xae\xa1\x48\x95\x77\x8a\x2c\x04\xf9\xee\x85\x6c\xb7\xca\x12\x4e\xfc\x73\xf7\x7b\xef\x1e\x98\xe7\xf8\x5c\xf4\xe2\x8f\xf8\xad\xce\x75\x54\x3e\x53\xcd\x68\xc9\x9a\x5f\xc6\x6a\xce\x49\xdb\xc5\x64\x58\xba\x9b\xc5\x74\x21\xa1\x5e\xb8\x95\x73\x79\x8e\x27\x2f\x75\xe3\x47\x34\x52\x37\x9c\x60\xd1\x73\xa2\x50\xb2\xc2\x1a\x0a\xe8\x3b\xb5\xc4\xd4\x66\x88\xd6\x70\x1a\x44\x19\x07\x56\xfb\x4e\x6d\x4b\xa8\x48\xbc\x6e\x26\xcc\x61\xff\x6d\xff\x15\x8f\x53\x17\x27\x06\xa1\x60\x33\x4e\x18\x68\x84\x45\x54\x72\x9a\xdb\x76\x78\xb4\x5b\xc5\xc0\x92\x8e\x93\x8a\x21\x06\x3f\x22\x06\xc6\xd9\x6d\x9e\xe3\xb2\x12\xff\xe9\x25\xb1\x42\x42\x99\x98\x54\x42\xfd\xce\xe0\x06\x3f\x39\x35\xd4\x5d\x35\x6f\x75\x56\xad\xe4\xb4\xc3\x0f\x1a\x0b\xc6\xc0\x33\x4f\x9b\xd8\xfb\x23\xe2\x0b\xa7\x24\xc7\xf7\x83\x68\xc7\xa5\x54\x52\x92\xf7\x23\x28\x1c\x11\xa2\x4d\x58\x5c\xb3\x5c\x0f\x53\xfd\xac\x9d\xcd\xd0\x82\x4b\xea\x95\x61\x8d\x28\x06\xf1\x1e\x97\x73\x4b\x61\xbc\x84\x76\x9e\x4a\xa7\x18\x0a\x86\x67\x55\x50\x59\xf6\x89\x8c\x37\xd8\x27\xb4\x67\x9f\x53\xfb\x0c\x15\x45\x25\x81\x77\xae\xea\x43\x89\xd2\x47\xe5\x25\x65\x28\x50\xf9\x48\x76\xbf\x5d\xa1\x88\xd1\x9f\x4b\x5f\x91\xd8\xfa\x14\x66\x77\xe7\xca\x0c\x5b\x5e\xdf\x6d\x57\x78\xbb\x30\x5e\x38\x8d\x1f\x72\x3e\x64\xdc\xf3\xfa\xee\xcb\xc4\xb8\x40\xa6\x41\x1e\x4e\xdd\xd2\xf0\xe9\xfa\x8d\x36\x87\x0c\x0f\xa7\x0e\xd3\xf3\xf2\x3f\xf4\xba\xc9\x10\xa8\x65\xa8\x25\x09\xf5\x0a\xaf\xee\xc6\x36\x4f\xcf\xd2\x2d\x17\x12\xfe\x45\xb0\x58\xb9\x37\xf7\x37\x00\x00\xff\xff\x4e\x32\x53\x1a\xc0\x02\x00\x00"), - }, - "/src/math/rand": &vfsgen۰DirInfo{ - name: "rand", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218308925, time.UTC), - }, - "/src/math/rand/rand_test.go": &vfsgen۰CompressedFileInfo{ - name: "rand_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218341129, time.UTC), - uncompressedSize: 160, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xcb\x51\x0a\xc2\x30\x0c\x00\xd0\x6f\x73\x8a\xd0\xaf\x4d\x61\x03\x3d\x82\xe0\x05\xdc\x05\x6a\x57\x4b\x5c\x4d\x4a\x93\x22\x22\xde\x5d\x10\x3f\xfc\xd9\xf7\xe3\x8d\x23\xee\x2e\x8d\xf2\x8c\x37\x05\x28\x3e\x2c\x3e\x45\xac\x9e\x67\x00\xba\x17\xa9\x86\xce\xa2\x1a\x71\x72\x00\xd7\xc6\x01\xa7\xa8\x76\xca\xe2\xed\xb0\xef\x0c\xb7\x3f\x1d\xa6\x1e\x5f\xb0\xb1\xe1\xbc\x50\xe9\x9c\x66\x79\xb8\x1e\xde\x7f\xe7\x28\x1c\x5a\xad\x91\x6d\xbd\x35\x25\x4e\xc8\xa2\x4f\x0e\xdf\xfe\x09\x00\x00\xff\xff\x3d\xb4\x3b\xb8\xa0\x00\x00\x00"), - }, - "/src/net": &vfsgen۰DirInfo{ - name: "net", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218893255, time.UTC), - }, - "/src/net/http": &vfsgen۰DirInfo{ - name: "http", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218761034, time.UTC), - }, - "/src/net/http/cookiejar": &vfsgen۰DirInfo{ - name: "cookiejar", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218529933, time.UTC), - }, - "/src/net/http/cookiejar/example_test.go": &vfsgen۰CompressedFileInfo{ - name: "example_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218568509, time.UTC), - uncompressedSize: 269, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\xcc\x41\x4e\xc3\x30\x10\x85\xe1\x75\xe7\x14\x4f\x5d\xb5\x02\x35\x82\x65\x77\xa8\x02\x24\x16\x05\xd1\x03\xd0\xa9\x3d\x21\x6e\x1c\xdb\x78\x26\x0d\x08\x71\x77\x14\xb1\x65\xfb\xf4\xbd\xbf\x69\x70\x75\x1a\x43\xf4\x38\x2b\x51\x61\xd7\xf3\xbb\xc0\xe5\xdc\x07\x39\x73\x7d\x33\x51\x23\x0a\x43\xc9\xd5\xb0\x6c\x07\x5b\x12\xb5\x63\x72\xb8\xff\xe4\xa1\x44\xd9\xcb\xb4\x5a\xe3\x9b\x16\x4d\x83\x24\x36\xe5\xda\x83\x9d\x13\x55\xa4\x6c\xd0\xb1\xcc\x4f\xf1\x38\x7d\xe1\x31\x97\x4e\xea\xd3\xe1\x1a\x9c\x3c\xac\x0b\x8a\x39\x0f\x2f\x45\x92\x57\xe4\x84\xce\xac\xcc\xdb\x66\x2f\xd3\x41\xea\x45\x2a\xd1\xa2\x1d\x6c\xf3\x52\x43\xb2\x98\x56\xc7\xbb\xd6\xa4\xe2\x46\x0d\x55\x3e\x46\x51\xdb\x12\xf0\x10\xf9\x92\xeb\x16\xbb\x2e\xbb\x1c\xd9\x04\xbb\x2e\x14\xfa\xb3\xb7\xc9\xff\x67\x9f\xd9\x06\xe1\x88\x57\x0e\x1a\xd2\x71\x4d\x3f\xf4\x1b\x00\x00\xff\xff\x4a\xaa\xb1\x5a\x0d\x01\x00\x00"), - }, - "/src/net/http/fetch.go": &vfsgen۰CompressedFileInfo{ - name: "fetch.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218698588, time.UTC), - uncompressedSize: 3551, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5f\x6f\xdb\x36\x10\x7f\x16\x3f\xc5\x4d\xc3\x3a\x29\xb5\xa5\x16\x28\xfa\xe0\xc5\x0f\xa9\x9b\x76\xc1\xda\xa5\x48\xb2\xa7\x20\x18\x68\xe9\x24\x31\x91\x48\x85\xa4\x92\x18\x81\xbf\xfb\x70\xa4\x24\xcb\x49\xda\x62\x01\xea\x4a\xe2\xf1\xee\x77\x77\xbf\xfb\x93\xa6\xf0\x7a\xdd\x89\x3a\x87\x6b\xc3\x58\xcb\xb3\x1b\x5e\x22\x54\xd6\xb6\x8c\x89\xa6\x55\xda\x42\xc4\x82\x10\xb5\x56\xda\x84\x2c\x08\x8b\xc6\xd2\x7f\x42\xf9\xdf\x54\xa8\xce\x8a\x9a\x5e\x8c\xd5\x99\x92\x77\x21\x63\x41\x58\x0a\x5b\x75\xeb\x24\x53\x4d\x5a\xaa\xb6\x42\x7d\x6d\x76\x0f\xd7\x26\x64\x31\x63\x69\x0a\xc6\x6a\xe4\xcd\x19\xf2\x1c\x35\x88\xa6\xad\xb1\x41\x69\x0d\x70\x09\x42\x25\xf4\x7d\x55\x2b\x83\x1a\xee\x35\x6f\x5b\xd4\x50\x28\x0d\xf4\x99\xaf\x6b\x3c\x77\x97\x41\x15\x0e\xae\x59\xa4\x69\x81\x36\xab\x12\xd3\x62\x96\xdc\x57\xdc\xde\x97\x89\xd2\x65\x9a\x30\xbb\x69\x71\xdf\x96\xb1\xba\xcb\x2c\x3c\xb2\xa0\x45\x99\x0b\x59\xc2\xe5\xd5\x7a\x63\x91\x05\x5e\x0c\xe0\xe0\xda\x24\xa7\xeb\x6b\xcc\x2c\xdb\x32\x56\x74\x32\x83\x48\xc3\xc1\x54\x4b\xec\xa0\x44\x6d\x7f\x37\x86\x48\x82\x90\x76\x06\xa8\x35\xb8\x88\xc5\x64\x41\x14\x50\xa3\x8c\x74\xd2\x9b\x8a\x61\xb9\x84\x37\x74\x12\xdc\x71\x4d\xe1\x0d\x82\xf5\xaa\x02\x80\x25\x34\xfc\x06\xa3\xac\xe2\x72\xd0\x49\x87\xa8\xf5\xaa\xda\x3b\xf4\xca\x59\x10\xd0\x3f\x9d\x78\x50\xc9\x8a\xd7\x75\x14\x6a\xe4\x79\x18\xf7\x2f\xb6\x42\x19\xce\x48\x09\x79\x10\x69\x34\x5d\x6d\x27\xbe\x39\x80\x41\x40\x18\xfd\x59\xf2\x19\x6d\x14\xe6\x4a\x62\x18\x27\x1f\x94\xaa\xa3\x41\xa4\x87\x71\x38\xa7\xd4\x1c\x9f\x7e\xf2\x1f\x35\xda\x4e\x4b\xf7\xbc\x75\xbf\x6b\x2f\x33\xd5\x76\xc7\xeb\x8e\xd4\x9d\x48\x8b\xba\xe0\x19\x46\x71\x12\x4d\xfc\xdb\x4e\x01\x72\xa3\xe4\x0b\x00\xd3\x14\x8e\x8c\xe9\x1a\x34\x20\xec\xef\x06\x38\x7c\x3c\xfd\x7a\xfc\x90\x61\x6b\x85\x92\x09\xdb\x03\xe8\xd9\x9a\xfc\x8d\xf7\xbd\x42\x8f\xa3\x41\x63\x78\x49\x48\xce\xad\x16\xb2\x8c\xe2\x9d\x79\x7a\x32\x58\xa3\x27\x45\x90\x71\x83\xb0\x86\xc5\x12\x0e\xe7\xeb\x55\xb5\x20\xb9\x31\x81\xb0\x84\xf5\x20\x43\xa9\x76\x52\xce\xb8\x97\x73\x21\x81\x37\x8e\x07\xcc\xc5\x65\xcb\x02\x09\x4b\xc8\x54\xbb\x89\xda\x19\xec\xa8\xc0\xf6\xb4\x8e\xcf\x97\x72\x71\xc5\x06\x45\x72\x06\x52\xd4\x3f\x60\xa1\xab\x91\x28\xf6\x6e\x13\xfc\x34\x85\x8b\x4a\x18\x10\xa5\x54\x1a\xa9\x9c\x36\xfd\xa1\x57\x89\x39\x14\x5a\x35\x90\x71\x99\x61\x0d\x0d\xda\x4a\xe5\x09\x9c\x2b\x28\xb8\x9e\xc1\x09\xe4\x22\x07\xa9\x2c\xa0\xcc\x54\x47\x59\x73\x2a\x32\x25\x33\x8d\x54\x24\x54\xba\xc2\x76\x9c\x62\x0f\xf7\x15\x6a\x04\x8d\xd4\x2c\xc8\x0f\x5b\x61\x6f\x4d\x18\x68\x90\x4b\x21\xcb\xa2\xab\x13\xf8\xaa\x8c\x85\xce\xa0\x1e\x90\xf5\x62\x0e\x8b\x46\xd3\x26\x1f\x54\xbe\x49\x7a\x77\x12\x67\xe6\xa4\x20\x7d\x1a\x5d\xca\x25\x62\x0e\x56\xf5\xb6\xfa\xdb\x74\x3a\x03\x61\xc9\x1b\x58\xe3\xae\x8d\x60\x0e\x5c\xe6\x60\xd1\xd0\xe3\x7d\x85\x12\x6c\xc5\xad\xd7\x92\x29\xa2\x52\xd7\x26\xec\x69\xfd\xf8\xa0\x84\xf1\x2e\xfe\x3e\xf8\x69\x0a\xae\xbf\x5c\x68\x2e\x8d\xb3\x2f\x08\xd3\x99\xea\x64\x7e\xa1\x85\x6b\x4f\x4e\x3f\x05\x7e\x82\xa1\x33\x14\x94\x4f\x74\x15\x8e\xbe\x9d\x24\x70\x62\xc1\x74\x2d\x69\x30\x7d\x53\x12\xb2\x24\xf5\x14\x02\x25\x89\x78\x2a\x17\x68\xfa\xbe\xf5\xc4\xa8\xef\x5c\x8f\x23\x1b\x2c\x1c\xec\x4b\xc4\x3b\x48\x91\xc6\x5b\x38\x38\xc3\xdb\x0e\x8d\x8d\x21\x3a\x38\xeb\x2d\xcc\x26\xed\xa9\x72\x2c\x32\xc4\xe2\x6b\x93\x7c\xae\xd5\x9a\xd7\xbe\x5e\xfe\xf4\x27\x61\xec\x2a\x29\x66\x01\x75\xdf\x1b\xdc\xcc\xc0\x55\xb4\xbb\xa2\xb9\x2c\x29\xf9\xb7\x89\x97\x76\xd5\x43\x72\xff\xf6\x52\x3b\xa1\xfe\x92\xab\xe7\xde\x68\x1f\x72\xea\xed\x32\x0f\x67\x13\xe5\xf1\x58\x38\xaa\xb5\xa4\xa3\xe1\xed\xa5\x71\x65\x7b\x25\x86\x3e\xf2\xb8\x25\x65\xa1\xe7\x6f\xb8\x00\xf7\x47\x58\xbe\xba\x2f\x54\xd7\x61\x6f\xa9\x3f\xed\xdf\xdc\x49\xa6\x31\x47\x69\x05\xaf\xe9\x34\x34\xbc\xc1\xb9\xd2\xa2\x14\xae\x63\x6e\x99\x6f\x8a\xb7\x8e\x94\xf0\xcb\x92\x78\xe0\xc0\x53\x75\x9d\x7e\x3c\x5d\xc0\x27\x21\x73\x50\x9d\x05\x2f\x48\x41\xa6\xd4\x6d\x06\x26\xfa\xe4\x62\x4e\x43\x41\xb9\xb2\x70\x99\x1a\x65\x35\x27\x6a\x13\x69\x68\x6e\x00\xcf\xef\x88\x7a\x8e\xd0\x89\xb7\xe3\xff\xce\x11\xe1\x43\x57\x14\xa8\xcf\x55\xa7\x33\x04\x6e\x7f\x32\xf2\x7e\x25\x18\xf3\x46\x3c\x08\xd7\x1a\xe9\x6d\x36\xb4\x2a\x3f\xb0\xdd\x70\x3d\xaa\xeb\x68\xf0\x90\x02\x2e\x0a\x27\x34\xf1\x35\x18\x8e\x87\xaa\x84\x34\xdd\xf1\x0b\x9a\xce\x58\xe0\xf5\x3d\xdf\x18\xc8\x48\xc0\x79\xe9\xcd\x09\x99\xd5\x9d\x6b\x6c\x4a\x0e\x1d\x79\xd2\x1e\xa5\xa8\x27\x0d\xf2\x99\x1d\x16\x50\xe2\x2f\x43\xd2\x15\x5e\x51\xc7\x55\xf9\xc6\x65\x85\xaa\xe4\x9b\x56\x8d\x30\xb8\xcf\x59\xcf\x25\x17\x90\x70\xe6\x32\xf7\xcf\xd9\x97\xb1\xd5\xcf\x40\xb5\x36\x66\x6c\x9c\xb9\xa4\xe7\xc9\x58\x1d\xeb\x83\xcc\xfb\x69\xf2\xe2\xd8\x8d\xf7\x50\x3c\x1d\xb5\x3f\x9c\xb4\x9e\x80\x04\xdc\xd7\xcb\xe3\xd6\xc7\x64\x37\x2d\xab\xb1\xea\x7a\x87\x94\x3e\xe6\xce\x25\xa7\xd8\x55\x87\xab\x94\x17\xa6\x64\x76\x43\x9a\x57\x5c\x2a\x29\x32\x5e\x7b\x13\x7f\xe1\x26\xba\xc1\xcd\xfe\xd0\xeb\x81\x5c\x66\x37\x14\x5c\x5f\x80\xd1\xee\x5b\x5f\x85\x4f\x06\x25\x85\x2f\x08\x32\x25\x2d\x4a\xfb\x05\x65\x69\x2b\xc7\x28\x69\xdf\xbf\x8b\xe6\x6f\x9d\x90\x28\x20\xab\x47\xb2\xf5\x3b\x61\xf2\x8d\x6b\x83\x27\xd2\xf6\x26\xbc\xa7\x2b\xaf\x68\xee\x35\x85\xf1\x0c\xde\xbe\x99\xc1\xfb\x77\xf1\x1f\xee\xfa\x72\x42\xc3\x27\x46\x97\x90\xd5\x0e\x91\x03\x34\x99\xdb\x7e\x28\xf7\xa9\x3d\x9c\xc3\xab\x21\xa3\x5e\xcb\xb9\xe5\xb6\x33\x7d\xa3\x80\xbd\x25\xc5\xb8\xa3\xc9\x6e\x00\xaf\x21\x84\x10\x5e\x83\xbf\x74\x81\x0f\x36\x7a\xf1\x02\xb9\x15\xc7\xb3\x89\x81\x95\xca\x71\xf1\x5d\x03\x4e\xde\x8b\xfb\x04\x8d\x78\x7c\x70\xfc\xd1\x6a\xea\xf0\x02\xf6\xfc\xf7\x12\x54\x2e\xe3\x55\x80\x57\xd3\xa5\xe0\xd1\xbf\x2c\xf6\x10\xb8\x5a\x1a\x68\x55\xa2\xf5\xa2\x61\xec\xf7\xaf\xa0\x9f\x13\x8b\x31\x38\xb7\xee\xfb\x76\x31\xc6\xf5\x70\x4e\x55\xe5\x90\x3d\xd8\x28\x4e\x3e\x2a\x89\x51\xbc\x60\xfd\xf2\xb7\x9d\xb0\xff\xe5\x35\xee\x59\xa6\xc6\x95\xad\x68\x6c\x72\x4c\xe5\x55\x44\xa1\x44\x9b\x52\x7f\x5b\xf8\x7e\x19\xc5\x50\x70\x51\x63\xbe\x80\xdf\x8c\xab\x6c\xb7\xd2\x8d\xd4\xfc\x5f\xf8\x62\x36\x01\xf1\x93\x4b\x63\xa3\x3f\x5a\xd3\xe4\x1d\xda\xb6\x28\xa0\x55\xc6\x88\x75\x8d\xcf\x86\x3b\x7b\xd6\xdf\x86\x45\x74\xe2\xd5\xa0\xc8\x6f\x1a\x98\xd3\xae\x31\xf2\xd6\x6f\x93\x9e\xc1\x8b\x9d\x3a\xfa\xe0\xf7\xc0\xef\xed\x9d\xcf\xfa\xea\x96\x6d\xd9\x7f\x01\x00\x00\xff\xff\xcd\xea\xf8\xb6\xdf\x0d\x00\x00"), - }, - "/src/net/http/http.go": &vfsgen۰CompressedFileInfo{ - name: "http.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218832233, time.UTC), - uncompressedSize: 2998, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x56\x61\x6f\xdb\x36\x10\xfd\x2c\xfe\x8a\xab\x06\x04\x52\xaa\xc8\x0d\x50\x74\x43\x1a\x63\xc8\xd2\xae\x09\xd0\x74\x85\x93\x02\x05\xba\xa2\xa0\xa5\x93\xc4\x84\x26\x15\x92\x8a\xe3\x15\xfe\xef\xc3\x91\xb2\x22\x3b\xe9\x86\x2d\x5f\x42\x93\xc7\xbb\x7b\x8f\xef\xee\x34\x99\xc0\xf3\x79\x27\x64\x09\xd7\x96\xb1\x96\x17\x37\xbc\x46\x68\x9c\x6b\x19\x13\x8b\x56\x1b\x07\x09\x8b\xe2\x79\x57\x09\x1d\xd3\x62\xe5\xd0\xd2\x02\x8d\xd1\xc6\xaf\x84\x9e\x08\xdd\x39\x21\xe9\x87\x42\x37\x71\x78\xef\x5a\xa3\x9d\xbf\x60\x9d\x29\xb4\xba\x8b\x19\x8b\xe2\x5a\xb8\xa6\x9b\xe7\x85\x5e\x4c\x6a\xdd\x36\x68\xae\xed\xc3\xe2\xda\xc6\x2c\x65\xec\x8e\x1b\x78\x83\x15\xef\xa4\xbb\x32\x5c\x59\x9f\xc2\x14\xaa\x4e\x15\x49\x0a\x33\xdd\xa9\xf2\xca\x88\xb6\x45\x03\xdf\x59\x64\x97\xc2\x15\x0d\xad\x0a\x6e\x11\xae\x6d\xfe\x4e\xea\x39\x97\xf9\x3b\x74\x49\x5c\xa1\x2b\x9a\x38\x85\x67\x53\x3a\xf9\xa4\x4a\xac\x84\xc2\x12\xf6\xf6\x76\x2d\x67\xc8\x4b\x3e\x97\x78\xe9\x0c\xf2\xc5\xe3\x2b\x47\x30\x99\xc0\xb6\x11\x08\x0b\x9d\xc5\x12\xb8\x05\x0e\x45\x83\xc5\x0d\x54\xda\x80\xed\x5a\x9f\xb3\xae\xc0\x7a\x43\xa1\x6a\x30\x68\x5b\xad\x2c\xc2\x5c\x97\x02\x6d\x06\x16\x03\xcb\xf6\x68\x32\xf1\x69\xe6\xb6\xc5\x22\x5f\x36\xdc\x2d\xeb\x5c\x9b\x7a\xf2\x53\xb8\x6d\x73\x16\x45\x06\x5d\x67\x14\xec\x79\xcb\x81\x96\xef\xeb\xa7\x61\x7f\xbe\x78\x7f\xe6\x5c\x3b\xc3\xdb\x0e\xad\x7b\x02\xcc\xc8\xe3\xe7\xb3\xd9\x96\xbf\x32\x50\x3f\x32\x51\x7a\xcb\x60\xcd\xd6\x49\xca\xd8\x64\x32\x3e\x18\xb8\x58\x36\xa8\x40\xa1\x70\x0d\x1a\xf8\x9d\xb2\x85\x93\x8f\xe7\xa0\xb4\x81\xed\xac\xfc\x36\x37\x08\xfc\x8e\x0b\x49\xac\xe6\x70\xee\x80\xcb\x25\x5f\x59\xa8\xb8\x90\x36\x67\x6e\xd5\xe2\x56\x18\xeb\x4c\x57\x50\x1a\x8c\xf4\x00\xc9\xe8\x6c\xa4\x8d\xc4\xe0\x2d\xec\xf7\x81\x52\x48\xf6\x67\x3d\xfb\x19\x78\xd5\xa6\xa4\x97\x0d\x3a\x21\xfb\x5d\x9b\x7f\xc0\x65\xe2\x05\x4c\x0f\x73\x34\xc0\xd0\x55\x8f\xe4\x69\x14\x96\xc0\x0f\x28\xe2\x94\xad\x59\x48\x7c\x4c\x6d\x9f\x39\x05\x16\xaa\x92\xa2\x6e\x1c\x2c\x78\xfb\x65\x93\xe5\xd7\xfd\x6b\x9b\xff\x31\xbf\xc6\xc2\xb1\x01\x9d\x83\xfd\xb1\x8f\xff\x8a\xf0\xbe\x31\x70\x34\xfd\x37\x71\x78\xd4\x29\x63\x91\xa8\xc0\xe5\x43\x72\xd3\x29\x51\x43\x6e\xa2\xf1\xee\x8f\x92\x0e\xca\x18\x99\x7e\x31\x78\xfb\x15\xa6\x70\xdf\x18\x2f\x2a\x34\x50\xa2\x44\x87\xc9\x83\x4d\x06\x06\x6f\x29\x34\x55\xc7\x69\x43\xc9\x2e\xf8\x0d\x26\x45\xc3\x15\x0c\x90\x52\x16\xa1\x31\xbb\xc7\x01\x26\xf3\x28\xf3\x4b\x02\xa6\x95\xd4\xbc\x8c\xb3\x4d\xab\xa0\xd4\x1b\xe4\x25\x9a\x0c\xbe\xd1\xe5\xa1\x2d\x11\xe4\x99\x3f\x49\x7c\x5f\x1b\xff\xa6\xf6\x36\xfa\xfd\xe5\x2b\xed\x24\x14\xe4\x94\x4b\x99\xc4\x35\xba\x13\x29\x37\xb9\x9d\x79\x2b\x1b\xa7\xf9\xa5\x33\x42\xd5\x49\x0a\xcf\x21\xfe\x53\xc5\x69\x9a\xa6\x39\xf9\xb8\x38\xbf\x78\x1b\xac\x92\x94\x45\xd1\x5c\x97\xab\x27\x1e\xe5\x93\x50\xee\x97\x13\x63\xf8\xaa\x7f\x10\x0a\xe8\x4f\x36\x8d\x23\x4e\xd3\xfc\x5c\x39\x34\x15\x2f\x30\x49\xf3\x3e\x33\x62\x20\x2a\xb4\x72\xa8\xdc\x7b\x54\xb5\xf3\x34\x09\xe5\x5e\xbd\x4c\x0e\x0e\x29\x62\xdf\x21\x0d\xde\xe6\x17\xe8\x1a\x5d\x7a\x62\x7c\xdb\x88\xcf\xde\x9e\xbc\x89\xa9\xd4\xe9\xf1\x43\x1d\xd0\xf5\xbe\x65\xe7\x1f\xb9\xb1\x78\xae\x5c\x12\x68\x0c\x09\x9d\x86\x60\x07\x21\x5a\x9c\x66\x70\xf8\x22\x83\x57\x2f\xd3\xd7\xfe\xfa\x48\x37\xbb\x89\x4d\x41\xd2\xee\x9a\x45\xe3\x2e\xf3\xc8\x28\x24\x2f\x51\x25\x44\x56\x4a\x18\xd6\xcc\xb7\x23\x2f\x92\xe3\x03\xd8\xdb\xd0\xef\xa3\x5c\x3a\xee\x3a\x7b\x04\xfd\xdf\xc0\x9c\xf5\xfb\x3b\x4f\x03\x31\x3c\xdf\x35\xb9\xc2\x7b\x37\x32\xcb\x1e\x9c\x9e\xea\x12\x8f\x9e\x76\x4a\xb4\x04\xd3\xf0\xba\x43\xfc\xfe\xb1\x03\x65\xc1\xe2\x74\x8c\xf0\x08\xb6\x00\x7b\x83\xdf\x74\xb9\x1a\x1c\x00\x84\x69\x9a\x7f\xd0\xed\xa9\xd4\xf6\x09\x55\x06\x62\xfc\xd5\xbe\x14\x37\xb7\x0d\xde\x66\x9e\xb0\x68\xbd\x53\x1c\xbe\x60\x36\xd5\x81\xf0\x50\xba\xa1\x52\x42\x89\x1d\x1f\xfc\xa0\x17\xee\xb4\x3d\xea\xcf\x58\xc6\xe9\xe3\x30\x7c\xae\x8d\xfb\xdf\x61\x4c\xef\xbf\xe0\xaa\xc0\xdd\x08\xa1\x00\x75\x8b\x2a\xce\x46\x7a\x0e\xeb\x4f\xb3\xf7\xc3\x0b\xa6\xa3\x8c\x36\xf5\x73\xb5\x6a\x31\xce\x20\xe6\x54\x64\xf3\xae\xaa\xd0\xc4\x29\x0d\xf5\x86\x5b\x70\x1a\xe6\x08\xbc\x72\x68\x20\x04\x80\x4e\x39\x21\x87\x09\x3d\xef\xea\xbf\x84\x94\x3c\x5f\xe8\xf0\x9f\x06\xb4\x6d\xf4\xf2\xdb\xbc\xab\xf3\xa2\x16\xbf\x8a\x72\x7a\x78\x78\xf8\xe2\xe7\x57\x87\x34\x0e\x0c\x5a\x2d\xef\xb0\x64\x11\x7d\x11\xdc\xe0\x2a\x83\x3b\x2e\x3b\xb4\x54\x5e\x86\xab\x1a\x7d\xd2\x41\x2b\x9e\x18\xb2\xfb\xd6\x5b\x3d\x18\xf5\x97\xbc\xce\x1f\x28\xb0\xe8\xfa\x87\x08\x0e\xe2\x6c\x14\x22\xed\x9f\xdf\x37\x74\x0a\x42\xe2\x1a\x97\xe5\xd8\x8f\x0a\x0c\x03\x4a\x8b\xfe\x90\x94\x35\xf4\x81\x5e\x87\x24\xba\x13\x29\x93\x8d\x33\x8a\x20\x2a\x6f\xf4\x6c\x54\xed\x9b\xe3\xdc\x8b\x36\xf1\xe4\x0e\x03\x0b\x16\x9d\x1d\xa6\x7b\x41\x06\xe0\x1a\xff\x35\xb4\xca\x40\xa8\x42\x76\x25\x7d\x26\x69\xb5\x11\x46\xf0\xb8\x35\xa2\x03\xb0\x47\x71\x1e\x43\xca\xbc\x5f\x02\xc6\x58\x64\x51\x62\x18\xbc\xbe\xe7\x91\x1e\x08\xdb\xf1\x41\xe8\x27\xa3\x0f\x1d\xda\xc8\x28\x5a\x6f\xda\xb3\x70\x7c\xe0\x45\x3b\xfe\x22\x1a\x12\x5a\xff\xc3\xb0\x3e\xf5\x1a\xee\x1f\x6a\x67\x60\x7f\xf7\xaf\x73\xdf\x98\x0c\xf4\x8d\x9f\x4d\xdb\x83\xf3\x35\x6d\x6f\x3f\x56\x28\xac\x34\xc4\xfc\x3b\x00\x00\xff\xff\x05\x0b\xbb\x60\xb6\x0b\x00\x00"), - }, - "/src/net/net.go": &vfsgen۰CompressedFileInfo{ - name: "net.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 218947293, time.UTC), - uncompressedSize: 1122, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x92\x41\x6f\x1a\x3d\x10\x86\xcf\xf8\x57\xcc\xe7\x93\xfd\x75\xbb\xa8\x52\xd4\x43\x25\x0e\x0d\xad\x22\xaa\x36\x44\x42\x6a\x2b\x45\x39\x78\xbd\xb3\x1b\x83\xb1\xb7\x1e\x6f\xc3\xaa\xe2\xbf\x57\x5e\x76\x49\x02\x5c\x7b\xc2\x0c\x33\xcf\xfb\x68\x86\xe9\x14\xde\x14\xad\xb1\x25\xac\x89\xb1\x46\xe9\x8d\xaa\x11\x1c\x46\xc6\xcc\xb6\xf1\x21\x82\x60\x13\x8e\x21\xf8\x40\x9c\x4d\x38\x75\xa4\x95\xb5\x9c\xb1\x09\xaf\x4d\x7c\x6c\x8b\x5c\xfb\xed\xb4\xf6\xcd\x23\x86\x35\x3d\x3f\xd6\xc4\x99\x64\xac\x6a\x9d\x86\xaf\x86\x22\x3a\xe1\x30\x66\x60\x55\x59\x06\xa0\x18\x8c\xab\x25\x88\xc3\x4f\x18\x32\xe8\x33\x24\xfc\x61\x93\x46\x39\xa3\xc5\x21\x33\xbf\xc5\x27\xc1\x1d\xc6\x27\x1f\x36\xa0\xb4\x46\x22\x30\x04\xce\x47\xa0\xb6\x49\x86\x58\x42\xd1\xc1\x4d\x1f\xfc\x65\xc5\xa5\x64\xfb\x21\x57\x94\xf0\xff\x27\xa3\x2c\x06\x09\xe9\x53\x0c\x9c\x0c\x92\x44\x22\x1d\x3d\xe6\xde\xb9\x7f\xe2\x40\x1d\x2d\x9c\x89\x22\x51\xc7\x5a\x13\x7c\x81\x8b\xbb\xdf\x57\xab\xa8\xf4\x46\x48\x28\xbc\xb7\x29\x35\x60\x6c\x83\x83\x4a\x59\xc2\xb3\xee\xf7\x63\xb7\x18\x42\x29\x15\x33\x78\xf1\xed\x6a\xab\x9a\x1e\x26\x4f\x69\xd9\x25\xe8\x0f\xe3\x4a\xff\x44\x8b\xbb\x33\xf2\x77\x43\x51\x2d\xee\x2e\xb3\x8e\x90\xad\xda\x8d\xf7\xbb\x56\x7a\x63\x7d\x2d\x24\x18\x17\x5f\x0c\x0c\xff\x97\x7c\xb5\xfc\xf6\xf1\xe7\x7c\x79\x7b\x9b\x86\xa7\x53\x98\xfb\xa6\x03\x5f\x0d\x07\xa0\x7c\xe1\x4a\xdc\x5d\x77\x11\xf3\x03\xba\xe8\x22\xf6\x35\x31\x1e\x29\x83\x43\xf5\x34\x61\x9d\x86\x23\x06\xa7\xec\xb2\x58\xa3\x8e\x82\x64\x3e\x57\xd6\x0a\x6e\x12\x60\x59\xf1\x2c\x35\xdd\x58\x5f\x28\x9b\xdf\x60\x14\x7c\xd5\x13\xf9\xd8\x57\x05\xbf\x9d\x3f\xaa\x30\xf7\x25\xf2\x0c\xb4\x94\x09\x29\xe4\x89\x6b\x4a\xa7\xfc\xf3\xaf\x56\xd9\x17\x96\xd4\x17\xc4\x2e\x83\x0e\xee\x1f\x0e\x86\xe3\x3d\x4d\x05\x16\x9d\xd8\x49\xf8\x6f\xd6\xbf\xba\x7e\x99\xaf\xb7\x39\xd9\xb3\x49\xe5\x03\x98\x0c\x0a\xf8\x30\x83\xa0\x5c\x8d\xb0\xeb\x1b\x4d\x05\x45\x9a\xed\xee\xcd\x43\x5f\x38\x19\x4d\xb3\xfb\xe3\x2a\x62\x68\xf1\xa2\xf3\xa5\xed\xd2\xb1\x28\x68\x10\x3f\x5b\xf1\xb9\x16\x3d\x6b\xcd\x66\xa0\x5f\x39\x99\x53\x9f\xb7\xef\xd8\x9e\xfd\x0d\x00\x00\xff\xff\x93\x28\xa9\x7f\x62\x04\x00\x00"), - }, - "/src/os": &vfsgen۰DirInfo{ - name: "os", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 219255573, time.UTC), - }, - "/src/os/os.go": &vfsgen۰CompressedFileInfo{ - name: "os.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 219150959, time.UTC), - uncompressedSize: 581, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x91\x4f\x6b\xdc\x30\x10\xc5\xcf\x9e\x4f\x31\xd5\x49\x62\x5b\x3b\xb9\x76\x6b\x4a\x28\x61\x5b\x28\x2d\xb4\x94\x1e\x42\x28\xfe\x33\xd6\x8e\x23\x4b\x46\x92\x9b\x85\x65\xbf\x7b\x91\xd6\x4e\x21\xe0\x83\xd1\xfc\xde\x9b\x99\x37\x55\x85\xbb\x76\x61\xd3\xe3\x18\x00\xe6\xa6\x7b\x6a\x34\xa1\x0b\x00\x3c\xcd\xce\x47\x94\x50\x08\xf2\xde\xf9\x20\x00\x0a\xa1\x39\x1e\x97\xb6\xec\xdc\x54\x69\x37\x1f\xc9\x8f\xe1\xff\xcf\x18\x04\x28\x80\x61\xb1\x1d\xfa\xc5\x46\x9e\xe8\x4f\xe3\x75\x90\x0a\x1f\x1e\x43\xf4\x6c\x35\x9e\xb1\xaa\xd0\xba\x88\x5d\x63\x0c\xf5\xe8\x2c\xfe\x66\xdb\xbb\xe7\x00\x85\xa7\xb8\x78\x8b\x77\x5e\x07\xb8\xac\x3e\x6c\x39\x4a\x85\x67\x28\x78\xc0\xd9\xbb\x8e\x42\xc0\xf7\x35\x8e\xa1\x3c\x18\xd7\x36\xa6\x3c\x50\x94\x62\xad\x08\xb5\x7f\x81\xde\x64\xe8\x97\xed\x69\x60\x4b\x7d\xb2\x28\x1a\xaf\xff\x26\xf5\xca\x5c\xb5\xe9\x51\x28\x28\x8a\xd4\x18\x6b\x9c\x9a\x27\x92\xdb\xc0\x6f\x31\x95\xcb\xaf\x64\x75\x3c\x4a\xf5\xee\x36\x81\x83\xf3\xc8\xc9\xe7\x66\x8f\x8c\x1f\x5e\x23\x7b\xe4\xdd\x2e\xf7\xcb\x96\x0f\xfc\x88\xf5\x95\xf9\x62\x7b\x3a\x49\xc6\x1d\xde\xaa\xf2\x67\x6e\x20\x93\xe1\x05\xd2\xc7\x03\x1a\xb2\x32\x69\x14\xd6\x35\xde\x64\x8f\x75\xaa\x6d\xa0\xb3\xf8\x28\x32\x7e\x79\x95\x74\x4b\x83\xf3\x74\x7f\xba\xe6\xb5\x55\xe9\x44\xdd\x12\x9b\xd6\x90\x54\x28\xb7\x9d\xf2\x45\x73\xaa\x6b\xe6\x42\xac\x8f\xa1\xfc\x46\xcf\x52\xdc\xbf\xc8\xf2\xb1\x78\x9a\x0d\x4d\x64\x23\xf5\x98\x96\x3f\x7c\xbf\xfb\xf1\xe9\x73\x3d\x06\xa1\xe0\x02\xff\x02\x00\x00\xff\xff\x55\xfc\x3a\xb3\x45\x02\x00\x00"), - }, - "/src/os/signal": &vfsgen۰DirInfo{ - name: "signal", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 219336861, time.UTC), - }, - "/src/os/signal/signal.go": &vfsgen۰CompressedFileInfo{ - name: "signal.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 219392149, time.UTC), - uncompressedSize: 233, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xce\xbf\xca\xc2\x40\x10\x04\xf0\x7e\x9f\x62\xca\x84\x0f\xbe\x13\xad\x2d\xc4\x42\x3b\xc5\x17\x90\x4b\xb2\x09\x1b\x2f\x7b\xe1\xfe\xd8\x84\xbc\xbb\xa0\x69\x22\xd8\xce\x6f\x60\xc6\x18\xfc\x55\x59\x5c\x83\x3e\x12\x8d\xb6\x7e\xd8\x8e\x11\xa5\x53\xeb\x88\x8c\xc1\x75\x15\x41\x22\xd4\x27\xc8\x30\x3a\x1e\x58\x13\x37\x68\x7d\xc0\xe9\x72\xb8\x1d\xcf\xfb\x3e\xfe\x13\xb5\x59\xeb\xa5\x7e\x6f\x24\xda\xca\x71\x91\x45\xd3\x6e\x5b\x62\x9a\x57\xcc\xba\xd2\x6f\x96\x4e\x7d\xf8\xcd\x81\xeb\x67\x51\xe2\xc3\x00\x26\x04\x4e\x39\x28\x36\x98\x97\x1b\xce\xfb\xb1\x78\xcf\xbe\x02\x00\x00\xff\xff\x29\x0b\xd3\x08\xe9\x00\x00\x00"), - }, - "/src/reflect": &vfsgen۰DirInfo{ - name: "reflect", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 532694606, time.UTC), - }, - "/src/reflect/example_test.go": &vfsgen۰CompressedFileInfo{ - name: "example_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 219601139, time.UTC), - uncompressedSize: 311, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8d\xcf\x4a\x33\x31\x14\xc5\xd7\xbd\x4f\x71\xc9\xe2\xa3\xe5\x93\xa4\x95\xba\xe8\xec\x5c\x88\xe2\xa6\x62\x1f\xc0\xa6\x93\x9b\x3f\x75\x92\x0c\xc9\x8d\x08\xa5\xef\x2e\x33\x22\x82\xbb\x03\xbf\x73\xce\x4f\x29\xfc\x7f\x6a\x61\x30\x78\xae\x00\xa3\xee\xdf\xb5\x23\x2c\x64\x07\xea\xf9\x8d\xa9\x32\x40\x88\x63\x2e\x8c\xc2\x46\x16\x00\xb6\xa5\x1e\x1f\x3e\x75\x1c\x07\x3a\x70\x69\x3d\xef\xed\x72\x85\x17\x58\x28\x85\x8f\x79\xf4\x54\x9e\x0f\x68\x32\x55\x4c\x99\x31\x4c\xbd\x48\x89\x7f\x4e\xa5\x36\xe6\xf5\x3b\xee\xad\xc5\x44\x64\xc8\xa0\xcd\x05\xd9\x87\x8a\x93\x52\xce\x5f\x07\x22\xf4\xcc\x63\xed\x94\x72\x81\x7d\x3b\xc9\x3e\x47\xe5\x66\xc5\xb9\xfe\x86\x50\x6b\xa3\xaa\xb6\xbb\x1d\xc0\xc2\x46\x96\x2f\x25\x24\x1e\xd2\xf2\xf8\xa1\x87\x46\x1d\xfe\xbb\x3c\x51\x70\x9e\xbb\xb5\xdc\xe2\xbd\xa3\xee\xf6\x0a\xe7\x9a\x53\x87\x78\x11\x7e\x46\x62\x62\x37\x42\x3b\x12\x13\xfd\x3b\xdc\xc8\xbb\x79\xb8\x59\x5f\x8f\x2b\xb8\xc2\x57\x00\x00\x00\xff\xff\x0d\x48\xa9\x1a\x37\x01\x00\x00"), - }, - "/src/reflect/reflect.go": &vfsgen۰CompressedFileInfo{ - name: "reflect.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 532464528, time.UTC), - uncompressedSize: 36239, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x7f\x73\xe3\x36\xb2\xe0\xdf\xd2\xa7\xc0\xa8\xb6\xfc\xc8\x19\x2e\x3d\x72\xf6\x52\x29\x67\x9c\x57\x9b\x49\xb2\xe7\xdd\xcc\x38\x15\xef\xe4\xae\xce\xcf\x95\x85\x49\x50\x82\x45\x81\x5c\x12\xd2\x58\xf1\xf8\xbb\x5f\xa1\x1b\xbf\x49\xc9\x9e\x49\x72\xb7\xf5\x6a\xe7\x8f\xb1\x44\x02\x8d\x46\x77\xa3\xd1\xbf\x00\x1d\x1f\x93\x17\x37\x1b\x5e\x97\xe4\xb6\x9f\x4e\x5b\x5a\xac\xe8\x82\x91\x8e\x55\x35\x2b\xe4\x74\xca\xd7\x6d\xd3\x49\x92\x4c\x27\x33\xd6\x75\x4d\xd7\xcf\xa6\x93\x59\x2f\xbb\xa2\x11\x5b\xf5\x71\x23\x7a\x5a\xb1\xd9\x74\x3a\x99\x2d\xb8\x5c\x6e\x6e\xf2\xa2\x59\x1f\x2f\x9a\x76\xc9\xba\xdb\xde\x7d\xb8\xed\x67\xd3\x74\x3a\xdd\xd2\x8e\x70\xc1\x25\xa7\x35\xff\x85\x95\xe4\x8c\x54\xb4\xee\xd9\x74\x5a\x6d\x44\x01\x6f\x92\x94\xdc\x4f\x27\xc7\xc7\x84\x6e\x1b\x5e\x92\x92\xd1\x92\x14\x4d\xc9\x08\xab\xf9\x9a\x0b\x2a\x79\x23\xa6\x93\x4d\xcf\x4a\x72\x7a\x46\x54\xb7\x84\x13\x2e\x24\xeb\x2a\x5a\xb0\xfb\x87\x94\xdc\x3f\xe0\xfb\xa4\x93\xbb\x56\x3d\xd1\x5f\x37\xa2\x68\xd6\xeb\x46\xfc\x3d\x78\xba\x66\x72\xd9\x94\xee\x3b\xed\x3a\xba\x0b\x9b\x14\x4b\x1a\x75\x52\xc3\x86\x4f\x2c\x06\x11\x74\xda\x86\x0f\x5a\xd9\x85\x0f\xfa\x9a\xc7\x9d\x7a\xd9\x6d\x0a\x19\xc1\x8f\xf1\xc4\x46\xdf\x71\x56\xc3\xc3\xe9\x24\x24\xab\xec\x36\x6c\x3a\xd9\x70\x21\xbf\x50\x80\xc8\x19\x51\x7f\x2e\xaa\x04\x1e\x25\x2f\xd3\x34\x4f\x9e\x03\x81\x52\x72\x7c\x4c\x7a\x26\x49\xd5\x74\xa4\x63\xb4\x9e\x3e\x68\x76\xdc\xf6\xaa\x4f\x22\x77\x2d\x74\x4e\xc9\xf3\xdb\x3e\xbf\xb8\xb9\x65\x85\x54\x3c\xea\x98\xdc\x74\x82\xdc\xf6\xf9\xb9\x9a\xbc\xa0\x35\xbe\x53\x1d\xd2\xfc\x2f\x4c\x26\x33\x84\x30\x4b\x2d\x48\x2d\x57\x16\xae\x83\x98\x12\x44\x47\x41\xe6\x15\x91\xbb\x16\x41\x78\x3d\x66\x29\x39\x3b\x53\xe3\xbd\x13\x25\xab\xb8\x60\xa5\x6a\x3c\xe9\xa4\x92\x84\x23\xe4\xf6\x74\x32\x99\xf4\xfc\x17\x76\x4a\xd4\x44\x5b\xd9\x25\x16\x92\x7a\x3c\x4b\x15\xb2\x49\x9a\x66\xaa\xe1\x8a\x8b\x12\x1b\x7e\xe1\x9a\xa9\x87\x61\xb3\x5e\x76\xa7\x84\x08\xf6\xfe\x2d\x5d\xb3\x8b\xaa\x4a\xf4\x47\x64\xba\xa0\xf5\x65\x30\x8c\xec\xb8\x58\xcc\xd2\x34\x23\xb3\x59\xe6\x26\xc2\xee\xd4\x4a\x62\x0a\xf6\xd7\x4d\x53\x27\x29\x42\x7f\x98\x4e\x26\x43\x12\x76\x32\xcd\x2f\x3d\x0a\x02\x9c\x74\x3a\x99\x28\x70\x97\x31\x5d\xb2\x11\x26\x74\x32\x55\x52\x31\x41\xb9\xb9\x64\x40\xa4\xdb\x3e\xff\x4b\xdd\xdc\xd0\x3a\x7f\x4d\xeb\x3a\x99\xfd\xc1\xbe\x75\x23\xf0\x8a\xd8\xa7\xf9\xf7\x4c\x2c\xe4\x32\x49\xc9\xb3\x33\xf2\x92\x7c\xf8\xe0\xa6\x23\xe8\xda\x9b\x0b\x30\x62\xd2\xc9\x5c\x56\x35\x5d\x90\x0f\x67\x04\x3e\xbc\xd3\x4b\x4e\xbd\xf4\x99\x3a\xd6\x79\xd8\x5b\xd1\xb8\x54\xaf\x14\x8d\x26\x4a\x75\xe8\x49\xbf\x01\xfc\x7a\x72\x75\x8d\x98\xaa\xd7\x4a\x7a\xb9\x9a\xe3\xcb\x2f\x09\x27\xaf\x46\xe6\xf0\x25\xe1\x2f\x5e\x90\x7b\x25\xee\xdf\x6a\x5e\xe8\x56\x3d\xa9\x78\xd7\xcb\x1c\xd0\x58\x2b\x20\xae\xf7\xb9\x28\xd9\x5d\xc2\x53\x78\x67\x78\xa8\x9a\xf8\xcc\x5f\xe3\xb4\xda\x95\xe2\xbb\x12\xd2\xd9\x0c\xda\xf3\x8a\x3c\xb3\x7d\x70\x96\x93\xa2\x11\x92\x0b\xb5\x3a\xcd\xcc\x26\xd1\xb4\xce\x08\x6d\x5b\x26\xca\x24\x7c\x9e\x69\xac\x34\x1c\x45\xc3\xd3\xc7\xa4\x72\xed\xe8\x6d\x25\xd2\x20\xa4\xa5\x7b\x32\x59\xcb\x5d\x0b\x90\x50\x45\x54\x89\xbf\x4a\x35\x04\xb9\x6b\x67\xa9\xe9\xf1\x90\x5a\xae\xdc\x15\xcd\x46\x80\x6c\xa9\x65\x34\xff\x3c\xa9\x99\x88\xf0\x4e\xd3\x8f\xe6\xcf\x3b\xc1\x62\x0e\xf5\xac\x68\x44\xf9\xbb\xb0\xe8\xbf\x37\x87\x36\xa8\x1e\x83\xdd\x0f\xda\xb4\xab\xc5\x0f\x54\x2e\x3f\x42\xb5\x21\xf1\x10\x47\xd8\xb7\xcd\x70\x6b\x90\x82\x53\x42\x8c\x14\x0c\xb9\xab\x5b\xde\xd9\x96\xf8\x09\x9f\xfe\xac\xb9\x7c\x1a\xad\xf0\xcc\xcd\xc2\x43\xff\x0d\x6d\xaf\x3a\x79\x4d\xce\xc8\x46\xaa\x77\x43\xe5\xb7\xd9\xa7\x3e\x1f\x94\x4a\xec\xdf\x73\x59\x2c\x49\x27\xf3\xbf\x71\x51\x6a\xfd\x53\xd0\x9e\x91\x3f\xab\xcd\xff\x14\x74\x3e\x93\xea\x25\x10\xb8\x93\x19\x39\x72\x76\x01\x8a\x59\xcd\xd6\xa7\xf1\x76\xa6\x15\x7d\xcd\xd6\x33\x33\xdf\x9a\x89\x53\x32\xdc\x8b\x6a\x26\xc2\x3d\x06\x18\x06\x38\xbc\x5e\x52\x01\x28\x94\xbc\x53\x9c\xfb\xba\x91\xcb\x6f\x78\x17\xab\xd0\x9e\x89\xf2\x42\xd4\xbb\x58\x8b\xaa\x5e\x67\xe4\x92\x89\x52\x77\x7a\x88\x7b\x76\xac\xd8\xee\xef\xf9\x23\x2b\xb6\x7e\xcf\x01\x21\xac\x35\xf4\x51\x74\x28\x79\xe7\xd1\xa1\xe4\x5d\x3c\xed\xef\x36\xa2\x80\x69\xb7\xb4\xa3\xeb\x5e\xcd\xdc\xc9\x1d\x3c\x9a\x81\x4c\x73\x01\x8b\x9f\xae\x58\x72\x75\x8d\x26\x43\x46\xb0\x81\x93\xb5\x40\xe1\x74\x54\x2c\x18\xe1\x42\x4f\x93\x8b\x2b\xae\x64\xc7\xc7\x59\xf7\x37\x8a\xc4\x2d\x9e\x8e\xf5\x9b\x5a\x86\xd8\xe8\x67\x88\x4e\x83\xcb\x2b\xc2\x47\x37\x39\x88\x90\xea\x89\x18\x35\x1b\x39\x44\xc9\x80\x18\xe2\xd4\x6c\xe4\xeb\x48\xe9\x8e\x8e\xe7\xf3\x7c\x4b\x3b\x4e\x4b\x5e\xc4\x3c\xb7\xb0\x3e\x9c\x91\x39\x79\xf5\x8a\xcc\xff\xc7\x7e\xce\x5b\xab\x57\x6f\xd7\xbb\x96\xa9\x85\xac\x0c\xb7\x4c\x93\xf6\xb5\x5e\xdd\x1a\xaf\x98\x2f\x59\x30\xe8\x29\x31\x9f\xb4\x16\xe0\x02\xe0\x11\xc2\x85\x7e\xd2\x6c\x24\x3e\x6a\x36\x32\x12\x98\x73\x63\x71\x83\xd4\x98\x6d\xc2\x67\x94\x7e\xa6\xe5\xc6\x6b\xa1\xb9\xa5\x1f\x19\xad\xfd\x88\xfc\x98\xfe\xf7\xf1\x16\xd4\x87\x1b\x90\x69\x88\x2c\xe5\xbf\xcd\x8e\xf0\xc8\x4e\x66\x37\x0a\xd8\x27\x3e\x6a\xa3\xd8\xcf\xee\xd0\xa5\x09\x79\x6e\x59\x6e\x37\x91\x8f\xdc\x38\xf4\xbe\x61\xd4\xbe\x21\x5a\xc4\xe3\x37\xb4\x1d\xd7\xc6\xc6\xaf\x02\x28\x2b\xb6\x3b\x25\xe3\x3a\x68\xc5\x76\x96\x38\x4f\x54\x55\x6e\xf4\x1f\x64\x37\x3e\xba\x71\xe2\x3e\x0d\xec\xa5\xf2\xf8\xc6\x01\x3b\x67\xf0\x13\x41\x83\x53\x08\xb0\x2b\xe5\x19\x86\xeb\x01\x1f\xe1\x72\xd0\x40\xbf\xb3\xad\xf4\x9a\xf0\xdc\xca\x8c\x60\x87\x83\xcb\x22\x84\x83\x68\x57\xe0\x99\x63\xdf\x60\x69\x34\x55\xd5\x33\xf9\xed\xfa\x06\xcd\x33\xb3\x1b\xf0\x14\x34\x8f\x31\xc7\x2a\x3d\x43\xd5\xac\x1c\xba\x09\x01\x14\xa5\xb6\x86\x66\x1a\x62\x83\x0b\xd0\xf7\x93\xfd\x45\xa8\xff\x8d\x89\x6d\x15\x2d\xc0\x91\x77\x92\xa2\x40\x57\xfb\x7c\xbb\x60\x3d\xea\x7f\x3e\x23\x2b\x7f\x2d\x66\x83\x89\x9d\x12\xef\xcb\xa3\x2b\xd5\x0b\x18\xfc\xda\x65\xaa\x5a\x8d\x2e\x55\xe4\xa7\x5b\x67\x48\x63\x27\x7f\x0f\x53\x30\xae\x74\x50\xc0\xc4\x16\x12\x8c\x0f\xe5\x3f\x34\x30\x60\x32\xee\xd6\xe7\xef\xa0\x95\x72\x89\x6d\xa4\x20\x9c\x24\x31\x3b\xeb\x4a\x3f\x8b\x42\x3e\xd3\x43\x3e\xb4\xe9\x33\xea\x27\x9b\x97\x4a\xba\x0f\xbc\xd5\x4e\xb7\x3c\xe8\x6e\x3f\x4c\xa7\x10\xc2\xf0\x8d\x55\x2d\x80\x0a\x45\x4d\x5e\x22\x50\xf9\x4f\xb5\xd9\x6c\x76\xcb\xa9\x71\xa6\xec\xf7\x75\x53\x55\x44\x1b\xd5\x9f\x9d\x4c\xa7\xd6\x4e\x76\x9e\xaf\x21\x57\x22\xc9\x73\x7f\xd8\xd4\x6c\x4e\x49\x6a\x1b\x7b\x41\x1b\x99\x1b\x50\x07\x20\x18\xa9\x7e\xf3\x34\x48\x57\xa7\x32\xd7\xe6\xbd\xf9\x70\xad\xa0\x2b\xc7\x3d\x32\xdf\x89\xd6\x37\x6b\xda\x5e\x21\x67\xaf\xc3\xb1\x3d\x9c\x74\x90\xca\xbc\x4e\xd2\x10\x4d\x0f\x95\xd8\x47\xc0\xe1\x81\x23\xc6\x74\xf1\xb8\x81\xd1\x26\x42\xc8\x3f\xb4\x2c\x9e\xce\x54\xab\xd9\x3f\xa6\xc6\x8e\x71\x8c\xb0\x66\x92\x7e\x30\x55\xb6\x0a\x21\xc6\xe0\x9b\x82\xa1\xe2\xbe\xfa\x24\x35\x23\xa7\x84\x0b\xa0\xa0\x0b\x73\x39\x0a\x72\xb1\xa7\x4f\xb3\x91\x7b\x3b\x35\x1b\x69\xe7\xa7\x44\xca\x9b\xdb\xcd\x4e\xb2\x9e\x3c\x57\x7f\x82\x26\xdf\x50\x49\xbd\x66\xd0\x4b\xfd\xc3\x98\xd5\x74\x22\xe9\x82\x04\x0f\xac\x6b\x7c\xd3\x34\xb5\x61\xa6\xea\x16\x33\x51\x0d\x75\xfd\xdc\x8c\x61\xf9\x27\xa0\x71\x0a\xff\x27\x29\x49\x7a\x0d\x39\x25\xf7\x44\xcf\x44\x43\xbb\x12\x39\x60\x7d\x9d\x03\x56\x0f\x11\x00\x49\x17\x61\xff\x03\x00\xd4\x2c\xe2\xfe\x7a\xed\x25\xa9\x06\xe0\xf5\x9f\xcd\x06\xad\x79\x6f\x22\x44\x49\x0a\x53\x3f\x30\x9a\x25\x91\xe1\xa0\x51\xb1\x22\x53\x58\xeb\xf1\x9c\x53\x0f\xf0\x90\x22\xc0\x2a\xb5\x13\x0a\xf6\x3e\x51\xe0\x52\xe4\x89\x82\x7f\xa3\x36\xaf\x23\x43\x50\xa5\xd7\xdd\xbe\x05\xd6\xb1\xa4\x0b\xbd\xb5\x48\xba\x50\x0f\xcc\x00\xa7\x76\xa8\x4c\xe9\xe4\x89\x87\xb8\x02\x03\x68\x9f\x92\x1b\x78\xe9\x71\xf4\xa2\xaa\xbe\xe7\xbd\x92\x62\xf5\x6d\xb8\x00\x75\x9b\x44\xe9\x24\xfd\xd9\xcd\xc2\x1b\x43\xc3\xb9\xe2\x42\xaa\xb6\xe9\xf5\x34\x22\x0c\xd8\xbd\x9e\x5c\x5c\x54\x15\x04\x7d\x15\x21\x6a\x26\x12\x0f\x88\xa6\x87\x41\xcd\x86\x5d\xbc\x87\x19\x11\x69\x3c\xbe\xb2\x37\xf4\xcc\x24\xda\xc1\x7a\x66\x7a\x7d\x0e\xe6\xa6\x5b\xc1\xdc\xf4\x67\x3f\x1e\x6d\xd6\x9c\x83\x35\x3e\x3b\x63\x74\x0f\x00\x07\xf3\xf3\xc0\xa4\xd3\x89\x8f\xa0\x9d\x9f\xf7\x30\x23\x32\x8d\x31\xd0\xf3\xd3\x39\x13\xb7\x91\xf7\xb2\xbb\xb8\xb9\x0d\x82\xea\x5a\xda\xef\xa7\x10\x3f\x2d\xf4\xe2\xbf\x57\x7f\xcd\xbb\x87\xb1\x8d\xaf\xd0\x3b\x5e\x2f\xbb\x59\x46\x10\x30\x64\x0a\x16\x4c\x9a\x8e\xef\xb9\x5c\x2a\xbd\x67\x50\xe0\xbf\x80\xce\xd0\xb8\x16\x79\x2f\x3b\x87\x66\xff\xbf\x3a\x35\xb9\xd2\x4b\x27\xe0\xc2\xf2\x12\x09\xc6\xc4\xd5\xd9\x83\xf7\xd8\xc3\x1a\x55\x16\x58\xd1\xb4\x3b\x34\x75\x93\x52\x51\xa8\xef\x0a\x6f\xd2\x10\xec\xd1\x43\xdc\x4f\x3d\x43\x78\x30\x80\x33\x88\xe3\xe8\x64\x64\xf9\xea\xd0\xe4\x74\x32\x69\xbb\xa6\x1d\x31\x6f\xb5\xfd\xd4\x35\xed\x2c\xcd\x2f\x81\x3c\x89\xb2\x8a\xca\x5e\x02\x1d\xd5\x1b\xc0\x13\x1a\xaa\x6f\xca\xde\x78\xb0\x33\x52\x8a\xf4\x27\x5a\x6f\x58\x22\x01\xf3\x8c\x6c\x83\x19\x55\x35\xa9\x6a\xba\x48\x09\x34\xc2\xed\x0b\x6c\xfb\xdc\xec\x8a\x98\x35\x31\x11\xad\xb3\x33\x8c\x65\x41\xc8\xde\x7b\x88\x54\x8b\x9f\xfe\x20\x3b\xcc\xa4\x20\x23\x60\x8c\x7b\x65\x59\x46\xd6\xdb\xd6\x19\x6a\x80\xd2\x07\x40\x2a\x31\xa0\xd2\x07\x5f\xdf\xec\x85\x32\x48\x42\x08\xf6\x5e\xe9\x38\xfd\x7e\x96\x91\x6d\x66\x78\xd5\xc9\x5c\x39\x5b\x8d\x32\x0d\x1f\x19\x5c\x3f\x38\x17\x25\xef\x1c\x61\xdf\xd0\x15\x03\x87\xcb\xca\x5d\xa6\x16\x61\x46\x0a\xda\x2a\xc1\xf5\x28\xaa\xe3\x25\x9a\x2c\xcf\xce\xd0\x51\x43\xae\x53\xc1\x0b\x6b\xb4\xe6\x16\x28\x69\x2a\x22\x1a\xf1\x47\xf0\xdb\x60\x75\xce\x80\xad\x0a\x56\xcd\x04\x79\x45\x5e\x1e\xec\xaf\xec\xf1\x05\x95\x7c\xcb\x08\x44\x04\x4d\x5f\x85\xdc\x47\xf4\x2d\x68\x1b\x8e\xfb\x15\x40\x38\xdc\xdb\xb6\xc3\xae\x96\x6f\x9e\x28\xee\xda\x6c\x24\x65\x64\x40\xcc\x32\x7f\x45\x39\xb2\x8e\x99\xc7\x90\xa7\x0d\x13\x88\x64\xb0\xec\xf3\x6f\x6b\xb6\x4e\xd2\x54\x8f\xf4\x0b\xeb\x9a\x59\x4a\x1e\x14\xbf\x5f\xba\xc5\xaf\xf3\x98\x51\xd2\xf7\xef\x2e\x75\xf8\xcc\xcf\x84\x42\x3a\x01\x53\xc9\x90\xbf\x56\x1c\xb3\x59\x51\x27\xf2\x3a\x7b\xf8\x60\x88\xc8\xd5\xb2\x10\xbc\xf6\x97\x85\xe0\xb5\x2f\xdf\xbe\x37\x37\x9c\xb0\x51\x09\x45\x23\x50\xe5\x36\xdd\xcc\xf3\x6e\x80\xc0\xc3\x59\xf8\xb2\x38\x86\x02\xae\xa9\x60\x99\x39\x76\x7d\x0a\x42\x63\xbc\x32\x2d\xff\xb0\xa5\xf5\x2c\xa4\x3d\xe8\x94\x8b\x2a\x41\x3f\x85\x0b\x99\x11\x56\xb3\xb5\x56\xb6\x91\x39\x1e\xe1\x13\x4a\x91\x0d\xa7\x3b\x29\x52\x90\xd2\x8c\x00\x6c\x8f\x54\xaf\x97\x54\x5c\x54\x49\xc9\x3b\xf8\xf8\x0d\xef\x32\x22\x3f\x61\x44\x13\xb7\xf6\xc4\x36\xcd\x08\x04\xbd\x6d\xbc\xdc\x7e\xd7\x51\x70\x0f\x8d\xef\x36\xa2\x50\x0c\x13\x19\x41\x5b\x5f\xab\x69\x1d\x58\xd5\x56\x9d\x27\x86\xf6\xcd\xd1\x11\x81\xac\x18\x17\xa0\x6c\x21\x8d\xca\xc5\x95\x7e\xf4\xc7\xf9\x75\xac\x72\xd2\xb1\x95\x8b\xe3\x9f\x92\x9a\xf6\x92\xd0\x6e\xa1\x04\xd9\x0e\x81\x7b\xc8\xa6\x97\xe4\x86\x11\x50\x46\x66\x51\xdf\xf6\xe7\x41\xc0\xdc\xdb\x53\x34\x02\x66\xf7\x53\x5b\x4e\x1c\x2d\x57\xbd\x31\x8c\xa2\x49\xb6\x45\x35\x73\xdb\x5f\x84\x71\xef\x08\x6c\xb3\x91\xe3\x70\x4d\xd0\x1b\x00\x8c\x41\x7e\x0a\x27\x8d\x7b\x04\x9c\x3c\x17\xea\xff\x8b\x8d\x74\xbc\xf0\xb8\xf6\x86\xb6\x17\x55\xb2\x62\xbb\x51\x41\xd5\x89\xa0\x15\xdb\x79\x99\x20\x9b\x8d\xc8\x54\xef\xcc\x85\xeb\x06\xaa\xb4\x55\xfc\xe0\x62\x4b\x6b\x5e\x2a\x20\xb0\x01\x90\x19\x79\x01\x10\x8d\x15\x10\x6a\xd7\x83\x13\xd3\x51\x4d\x27\xa1\x2b\xb6\x4b\xc3\xf5\xe1\xcd\xcd\x33\x33\xf5\x1e\x39\x34\x59\x0f\x0e\xa7\xc3\x98\xfe\x82\xf0\xc0\xc3\xbc\x2f\xaa\xe4\x53\xd6\x9a\x8d\x63\x0e\x61\x1f\x1f\xa3\xb4\xa2\x25\x72\x51\x25\xda\x3e\xbb\xba\xbe\x74\x91\x3a\x3b\xda\xf1\x31\x99\xdc\xf6\x83\x28\x65\x2c\x6f\x08\x23\x4d\xa1\x7d\xd5\x33\x2d\x9b\xed\x15\x5a\xaa\x3a\xaa\x79\xff\x70\xff\x80\x2d\x50\x2e\x2b\x27\x97\x95\x89\x5f\xaa\xd7\x18\x84\xc4\xb2\x19\xa3\x82\xe1\x79\x2c\x02\x66\x0e\xa7\xd8\x1f\x58\xaf\x6b\xa3\xf2\x73\xd9\xd0\x84\xa7\xe4\x05\x99\x91\x25\xed\x89\x68\x8c\x7d\x00\xa0\x90\x12\xe8\xd4\x81\x3d\x99\x2b\xd7\xc8\x0e\x0f\x8f\x21\xb4\x6f\xc7\x3e\x3e\x26\xdf\xea\x90\x28\x0e\xa7\x9f\x5b\x64\x07\x06\x1d\xbe\x0f\x3a\x3e\x7f\x4e\xa8\x28\xc9\x73\x6f\xd7\x21\xb4\x63\x84\xd7\x35\x5b\xd0\xda\x74\x81\xb5\x02\x58\x01\x60\xdc\x97\xcd\x4b\x5e\x91\x95\x7a\xa9\x1a\xe9\x31\xbf\x24\x2b\x33\xec\x87\x0f\xf8\xd9\xa6\x67\x1c\x22\xfb\xc9\xa7\x87\x27\x54\x34\x62\xb7\x6e\x36\xbd\x26\xa8\x5d\x50\x1a\x11\xb7\xa6\x34\xc8\x07\xf3\x01\x09\x86\x38\x59\xfb\x1b\xdf\x3d\x10\x56\xf7\x1e\x1a\xba\x69\x04\xd2\x34\x0e\xd9\xc3\x2b\xf2\x73\x46\xca\x0d\xda\xfc\x3d\x93\x57\xaa\xf7\xf5\x97\xf0\xe8\x51\xa9\x28\x37\x6d\xcd\x0b\x2a\x99\x27\x1f\xe0\xf7\x9a\x41\xe0\x8f\x03\x6b\xc3\xd5\x20\xa9\xf8\xf6\xb6\xaf\xc2\xca\x1d\xd8\x9b\x51\xf8\x67\x69\xfe\x96\xbd\x37\xb8\xdf\xf6\x15\xfa\x6c\xe0\x86\x64\xfe\x48\xf6\x15\xc4\xb4\xc7\x5f\xd9\x18\x76\x06\xc5\x63\xf1\x6b\xb9\x6b\xdd\x62\x46\xda\xa5\x83\x36\x74\x31\xcb\x14\x61\xe9\xc2\xbe\xf2\x63\xf1\xb7\x7d\x05\x8f\x71\xe2\x4f\x52\x24\x36\xb2\x3d\xc3\x90\xb4\x01\x88\x63\x1b\x5d\xf5\x7f\x58\xd7\x78\x8e\xa5\x73\x92\xf6\x98\xb4\xce\x0f\xf4\x4d\xcd\xc0\xd4\x41\xa7\xe5\x67\x45\x5f\x28\x54\xb3\x61\x48\xdf\x97\xf1\x36\x11\xcf\x75\x30\x9b\x88\xcb\xc6\xd8\x00\x65\xe4\x08\x45\xfe\x68\x2b\x3b\xc3\x52\xe7\xec\x4c\xa3\xd2\x84\xc7\x61\xf9\x73\xf2\xe1\x94\xac\xa2\x9b\xfa\x20\x42\x8f\x79\x66\xfb\x49\xe7\x99\xf1\x23\x1e\x5b\xec\xeb\x9e\x0b\x99\x54\xe0\xaf\x65\xe4\x86\xcb\x1e\x6c\xf2\xcf\xff\xe4\x2c\x3b\xcb\x42\x45\xfc\xc8\xd1\x6d\x25\x14\x46\x84\x1c\x4a\x0f\x71\xe2\x5c\xc8\x2f\xd4\xb4\x9f\x27\x4a\xf3\x7d\x91\x26\xad\xec\x52\x02\x05\x42\x5f\x24\x6a\xfc\xd4\x35\x9c\x7f\xee\x5a\xce\x3f\xf7\x9b\xce\x3f\x8f\xdb\x66\xea\xbf\xcf\x4e\x5c\x87\xcf\x4e\xfc\x0e\x9f\x9d\xc4\x1d\x3e\xff\x93\x6b\xfb\xf9\x9f\xfc\xb6\x9f\xff\x29\x68\xfb\x8e\x3b\x94\x37\x01\xce\x9b\x01\xd2\xef\xb8\x87\xf5\x26\x44\x7b\x33\xc4\xfb\x1d\xd8\xed\xef\x00\x3f\xfc\xdb\x62\xa2\x53\xf7\xf6\xe6\xb0\x19\x4e\xe2\x1d\xf7\x66\xb1\x09\xa7\xb1\x09\xe6\x11\x87\x02\x60\xed\xb5\xb2\x53\x1b\xaf\xe7\xab\x5b\x47\xde\xb2\x2d\x0d\xdd\x77\x65\x8b\x79\xde\x7b\x25\xb0\xea\x97\x76\x0b\x65\x35\x00\xec\x94\x98\x12\x08\xfb\xe4\x90\x63\xaf\x20\x8e\xd8\xd8\xa7\xa4\xa0\x75\xad\x0c\x6b\x33\x2c\x84\xb8\xc0\xc3\x87\x6f\xce\xc1\x9f\x4e\xa4\x49\xad\x3a\xb9\xac\xb4\xac\x26\x2e\x80\x3f\xc8\x7f\x41\x51\x66\xb5\xd5\x2a\xdd\x4e\x0f\x66\x24\x97\xbc\x0f\xa2\x3e\xb4\x5b\x6c\xd6\x4c\xc0\xac\xfc\xa0\x9e\xbf\x7b\xab\x69\x00\x29\x9c\x75\x04\x13\xcf\x88\x42\x27\x7f\xbb\x59\x9f\x0b\x4c\xdd\x46\x99\x5b\xe8\x04\xf9\x42\xda\x2d\xc0\xd8\x51\x5b\x9c\xea\x73\x2e\x94\x0f\xe8\xe6\x85\x03\xa0\x0a\x77\xaa\x54\xf7\xf2\xb0\xbc\xe2\xd7\xa0\x42\x31\x4d\xa9\x19\x82\x71\x12\x05\x5a\x00\xcb\x52\x57\x80\x65\x10\xbc\xd8\x48\xbf\x08\xeb\xe5\x29\x26\xa8\x9d\xd3\x8d\xcf\xe7\xfe\x73\x1f\xfa\xd5\xcb\xeb\xbc\x41\xdf\x15\x62\x6e\x4e\xcd\xf9\xf5\x3b\xd1\x0e\x0a\xfa\x54\x6b\xdb\x00\x11\x97\xe5\xce\x48\xe7\x27\xba\xbd\xe9\xe8\x34\xab\xae\xba\xb9\x64\x52\xc7\x01\x33\xd2\x59\x4c\xfc\x22\x22\x1f\x65\x9d\x2b\x4d\xa7\xf1\xf2\x18\x04\xca\xaa\x28\xde\x46\x17\x89\x12\x16\x6f\x79\x28\x81\x2c\xd7\x6c\xbd\x6e\xb6\x2c\x71\x49\x52\x1b\x14\x0d\x01\xee\xc9\x93\x96\xbd\x4c\xed\x7e\x0b\x95\xc0\xc3\x36\x7d\x57\xd8\x36\x0b\x26\xfd\x50\x46\xdd\xd0\xf2\xb2\xa0\x35\xed\x92\x36\x1a\x30\x23\xc2\x24\xf9\x53\xf3\xe1\x60\xe5\x78\x1b\x0e\x62\xa7\x1f\xec\x1d\xca\x91\xf7\xf6\xe4\x8c\xf4\xfc\x17\x86\xb1\xbc\xa4\x58\x8e\xcd\xb9\xb0\x0b\xd3\x04\x01\xc6\x12\xd3\x69\x3a\x7d\x74\x5f\xc4\xc0\xc8\xeb\x25\x15\x5a\x74\xf4\xb6\xa7\x46\xc8\x75\x00\x43\xa1\xe3\x6f\x7d\x3e\xee\x6b\xda\x7a\x7c\xb2\x31\xc8\x64\x3d\x86\xf6\x93\x90\x09\x2d\xc1\x91\x61\x57\x6c\xf7\x5d\xd3\x79\xa3\x2a\x4f\x35\x1e\x2d\xf1\xd5\x8e\x4d\xd1\x4d\x27\x2b\xa3\xa9\xe2\xbc\x38\xdb\x61\xc4\x79\xb5\xd5\x34\x01\x86\x29\xe5\x3a\xa8\xcf\x5f\x6d\xc9\x99\x6a\xe7\x73\x16\x76\x87\x95\x1f\x94\xcf\xff\xc6\x76\x2e\xf6\x87\x48\xcf\x32\xb2\xda\xfa\xf1\x74\x4d\x91\xd5\x36\x23\x2b\x8f\xae\x2d\x2d\x0a\xd6\xf7\xde\x1c\xd7\xe3\xd3\x1c\x5a\x6f\x3f\x67\xe8\xcc\x18\x2a\x41\xbf\x74\x3a\x61\x42\x76\xbb\xf1\xb9\xaf\xd1\x5a\x5b\x21\x01\xb0\xe1\xe8\xb9\x84\xd1\xb0\xe1\x47\x9b\x5c\x30\x80\xae\xe2\xf3\x0c\xad\x1f\xc0\xc8\x92\x26\x66\x9a\x8e\x4b\x5c\x4b\xfb\x9e\x2f\xc4\x80\x32\x19\xd9\xd2\x7a\x4c\xe6\x80\xb4\x63\x04\xb9\xed\x7f\xa2\xf5\x38\x41\xb6\xb4\x4e\x23\xee\x32\x9d\x9d\xd0\x9e\x23\x10\x6a\x24\x0f\x01\x69\x4d\xf6\xde\x42\xc6\x38\x87\x0c\x6d\x4b\xa5\xff\x5d\xc2\x07\x9b\x2b\x32\xc0\x1f\x26\x53\x08\x27\x29\x10\x90\x47\xfd\x89\x22\xb9\x7d\x06\x1e\xf0\x9c\xb0\x9d\xae\x13\x41\x79\x0b\x9e\x6d\x67\x7a\xa8\xd1\xf2\x90\x35\x66\xc9\x56\x9a\x4b\x01\xe5\x4b\x56\x33\xe9\x6b\xe5\x78\x8d\x8f\x8b\xe8\x01\x99\x1c\x1d\xff\x1b\x1c\x66\xe5\xaa\x4f\xd6\xb4\x3d\x57\xd2\xed\xf2\xfc\x92\x10\x42\x30\xe0\xbd\x86\x82\x4d\xbb\xd8\xa7\x93\x15\xdb\xf5\xc1\x03\x8e\x05\x98\x72\x0a\xa7\xb0\x20\xdc\xc8\x7b\x22\x97\x0c\x3f\xe3\xf6\x06\xdf\xb9\x64\x1d\x95\x6a\xa7\x14\x25\xb8\xb9\x7d\x4e\xce\x2b\x02\x66\x8c\x6e\xc6\xee\x78\x2f\xfb\x0c\x9a\x2b\xc2\x48\xde\x08\x05\x8c\x4a\x13\xfe\x97\x4b\x06\x03\x15\x9b\xae\x63\x42\x02\x4d\x9a\x4e\x89\xe7\x86\xe9\x36\xbd\x0f\x32\x23\x1d\x5b\xd0\xae\xac\x59\xdf\x2b\x53\x4d\x41\x36\x7d\x0d\x42\x39\x39\x07\xa4\x6f\x58\x41\x37\x3d\xf3\xdb\xc0\x58\x16\xf1\x35\x5f\x2c\x31\x66\x2a\x69\xcd\x48\xb9\x61\x44\x36\x80\x02\x70\x8f\x37\x82\x70\x41\x28\xa9\x9b\xa6\xcd\xa7\x13\x20\x80\x47\x2b\x1b\x89\x53\x00\xc9\x73\x4d\xf8\x94\xf4\x2b\xde\xbe\x13\x92\xd7\x3f\xd1\x9a\x97\xa0\xd8\x20\x13\xa9\x48\x25\x59\x97\x73\xf2\x0a\x3f\x28\xe2\xbb\x33\x36\xa0\x2c\xe1\xdc\x82\x7d\xa7\xed\x0a\xe8\xa4\x0f\xe7\xc0\x17\x2c\xe5\x5c\xb9\x80\xc8\xa8\xe6\x9d\xdc\x74\x8c\xae\xb4\x3d\x76\x7c\x4c\xfe\xbe\x64\x30\x39\xde\x13\x5a\x77\x8c\x96\x7a\x9e\xac\xcc\xc9\x9b\x66\xcb\x48\x03\xfc\x20\x82\xdd\x01\x31\xd7\xb9\x1a\x12\x06\x7f\xf1\x22\x74\xe1\x5a\xf5\x18\xce\xeb\xed\x17\xf0\x31\x7d\x3b\xae\x05\x8f\x34\xe9\x94\x11\x34\x26\xe5\x23\x69\x28\x45\x9e\x51\x53\x65\x0d\xf9\xa2\x4c\xe9\xdd\x87\x34\xc6\x78\xc5\x76\x09\x97\x4f\xc0\x13\x38\x0a\x26\x83\xe1\x6a\xc2\x95\xaa\xd9\xd2\x8e\xac\xb6\xe1\x82\xd1\x3c\x01\xe9\x78\xe6\x72\x36\xb0\xef\xd9\x37\x53\x17\x87\xd2\x34\x1d\x91\x12\x8f\xc3\x90\xfe\xd9\x23\x24\xa1\x71\xfc\xf0\xb8\xd8\x38\x54\x06\x82\x33\x45\xd1\xf8\x91\x15\x4d\x57\x02\xf7\x57\x6c\xf7\x47\x5c\x7e\x2d\xe5\x1d\x1c\x0b\xac\xa9\x22\x07\xee\xb2\xac\xb7\x52\x01\x33\x56\x7b\xfb\xaf\xda\xe0\x8c\x09\xb1\x1a\xec\x6e\x30\x88\xb1\x0c\xf6\xed\x70\xaa\x11\xa0\xfb\x6f\xc6\x86\x8c\xfd\x5d\x98\x34\x34\x41\x34\x93\x1e\xb1\x43\x54\x2b\xa5\x56\xc6\x98\x74\x80\x2b\xfe\x0c\x80\x28\x56\x1b\x79\xb0\x6b\x26\x46\x0c\x68\x2e\xa2\x53\xaa\x4f\xd7\x1f\x96\x29\xae\xe2\x64\x2b\xbf\xe1\x1d\x18\x3b\x44\xbb\xd7\x23\xe1\x46\x25\x43\x7d\x57\xa0\x2d\xb2\xf5\x7c\x52\x5e\xd9\xe7\x2e\xe1\x95\xbb\xc0\x9f\xe0\xf5\x2c\xf5\x8d\xc6\x03\x11\x4b\xd7\x21\x23\xdb\x1c\xaa\x42\x30\x22\xa1\x46\x57\x56\x9d\x2f\xc2\x26\xc3\x65\x82\x15\x2e\x5c\x6f\x83\x94\x26\xbd\xd5\x1b\x47\xdd\x1f\x4c\x19\x49\x88\xb9\x36\xf3\x29\xba\xcd\xa9\xe9\x80\x56\xd2\x1f\xb0\x5a\x79\x96\x91\xa0\xb1\x7e\x3a\x68\x5d\x03\x79\xe3\xd6\xfa\xe9\xa0\x75\xa1\xec\x7b\x2e\x77\x71\x7b\xfb\x1c\x7a\x6c\x81\xe8\x8f\x0b\x32\x40\x8e\xad\x68\xe5\xfc\x99\x00\x97\xae\xfa\xd7\x41\x23\x14\xeb\x71\xcb\x35\x6c\xa3\x5e\x02\x4f\xcd\x77\x0c\x12\x20\x5e\x88\x38\x3c\x30\x7b\xb2\x39\xd5\x5a\x93\x21\xc9\x21\x76\xe0\x19\xbd\x5b\x65\xea\x22\x8c\xcc\x1b\x32\x8d\xf7\xf8\x71\x68\x01\xd5\xc0\x40\x8f\x28\x69\x98\x14\x45\xad\x87\xd0\xe2\x28\xf5\xf4\x20\x96\x41\xe8\x3a\x23\x5f\x37\x4d\x9d\x41\x0e\x3f\xd3\xf9\x55\x9b\x23\x32\xa9\x56\xd0\x5d\xfe\xd0\x03\x57\x23\x6f\x65\x17\x86\xb2\x31\x86\x77\x04\xab\xe5\xdb\xae\x6b\xba\x7b\x9b\x89\x79\xdd\x88\x2d\xeb\x94\x58\xae\x1e\xc6\x03\x92\x36\xca\x35\xac\x75\xa2\xb5\x1f\x7d\xc1\x95\x96\x77\x4d\x92\x92\x0f\xfa\xdb\xd1\xd3\x62\x98\xaf\x9b\x76\xe7\xea\xd4\x74\xbc\x52\x6b\xa7\x12\x56\x66\xd9\xcb\x7c\x05\xdd\x40\x55\x94\x2b\xb5\xdb\x60\xfd\xd6\xd1\x91\xfe\x1a\x17\x23\xed\x99\x70\xab\x96\x49\x69\xa6\x8b\xc0\x6c\x31\xd8\xbd\xae\x48\x5b\x6f\x7a\xf9\x35\xfb\x33\xb8\x86\xf4\xa6\x66\x09\xb6\x76\xaf\x5c\xf5\xeb\x74\x3a\xe9\x01\xc7\xbe\x2b\x2c\x8e\xa0\xe7\x80\x57\x6a\x40\xac\x0d\x06\x1d\x17\x22\xde\x47\x88\x7b\x5d\xce\xd4\x4b\x5c\x4d\x5c\x2c\x60\x96\xbd\xcc\x47\x17\x1c\x44\xc2\x71\x41\x3e\xf3\x20\xdc\x4f\x27\x4f\x21\x45\xbf\x72\xa7\x13\x26\x6a\x0e\x23\x13\x1c\x81\xac\x0c\xda\xfe\xcd\xa6\x97\x6f\xa8\x2c\x96\xc9\x80\xc0\x01\xb2\x58\xd8\x17\x2c\x4b\xa5\x8f\xcb\x5e\x6a\xc7\x56\x35\x0f\x36\x83\x11\xa6\xfc\xe4\x2f\x36\x93\x7b\x0f\xc7\x49\x71\xd5\x61\x63\x3d\x88\xde\x56\x34\x83\xc2\x1d\x27\x1a\xc4\xee\x4c\xd1\x20\x11\xf2\xbe\xce\xd0\x83\x28\x60\x21\x7d\xf6\xed\xaa\x5a\x1b\x70\xb1\x40\x2a\xfd\xe4\x54\x82\x3e\xee\xea\x2f\xc3\xf1\xee\xba\xb6\x6c\xbc\xb7\x59\x54\x60\xf1\x59\xfd\xe1\x76\x65\xb5\x60\x75\x2d\x4e\x14\x25\xe7\x95\x5e\xb6\x58\x84\xf3\xc8\x4a\x82\x67\xb9\x1d\x60\x96\x91\x97\x6e\x4d\xc1\x20\x47\x47\xbe\x1a\xf8\xf1\x02\xaf\x48\x18\xa9\xdc\x89\x40\x9d\x92\x82\x0a\xd1\x58\x07\x18\x4d\xed\xe6\x46\x52\xf0\xdb\xaa\xae\x51\xb6\x8c\xad\x16\xc7\xcc\xb1\xf2\x55\xe1\x94\x85\xab\xf3\xf3\x06\xc7\x03\x1a\x0e\x81\xad\x0e\xd4\xe3\x73\xb4\x23\x66\xfe\x5c\xb6\x8e\xb1\xae\x30\x17\x0d\x12\x9f\xbd\x1e\x05\xe3\x78\x69\x68\xc4\x28\x89\x71\xf6\x4e\x70\x9e\xc7\x63\xf7\x01\x70\xae\xf3\xd8\x59\x20\xae\x3a\x7d\x7b\x72\xee\xf9\x9a\x4a\x95\x7a\xf0\x40\x5c\x7e\xdb\x70\xb7\x67\xbe\x02\x19\xdf\xe2\x11\x06\x57\x50\x6d\x8e\x0f\xfc\xe7\x77\xe7\xff\xfb\xcd\xb7\xff\x39\x0b\x02\xbd\x3e\xe9\x9b\xd6\x1e\x02\xd8\x1a\xfd\xef\x27\xa7\x86\x9c\x3c\x1b\x17\xa5\x53\x5b\x6a\x8d\xc5\x8c\xae\xfa\x68\xd3\x43\x2d\xaa\x1a\xf9\x07\xda\x49\x4e\x6b\xb5\xc3\x9a\x5c\xd5\xcf\x19\xf9\x19\x12\x67\xf6\xcc\xea\x8f\xac\x60\x7c\xcb\xba\xa4\x69\xa1\xdc\x96\x0b\xb5\xb7\x82\x31\xf9\xd5\x57\x0e\x91\xcb\x25\xaf\xa0\xfc\xbc\xd8\x82\x79\x1e\xd9\xb4\xe3\xb2\xa3\xda\xc6\x56\xae\x12\x10\xf5\x02\xe5\xe1\xb7\xce\x7f\xed\xcd\x27\x54\xc2\xb0\x9a\xb6\x6d\xad\x34\xb7\x42\xc2\x03\x9c\x42\x26\x26\x34\x0b\xb6\x50\xdc\x90\xa4\xfb\x6d\x83\x30\x31\x13\x9a\x06\x63\x69\x1a\xbf\x52\x0b\x41\xf4\x89\x2b\x7f\x37\x69\xeb\x38\x69\xfd\x83\xec\xb4\x5d\xe4\xdb\x4c\x68\x6b\x65\x83\x7a\x00\xbc\xf0\x67\x98\xe2\xc7\xfb\x95\x26\xa3\xc8\xbc\x6e\xd6\x2d\xed\xd0\x02\x78\x14\x1d\x3d\x3c\x9a\xcf\xfa\x60\x6e\x38\xc6\x68\x9d\x82\xf1\x0c\x73\x7f\xb0\x81\xa9\x19\xd7\xdf\xcb\xfc\xed\x66\x0d\x95\x1e\x7e\xf1\x3d\xec\xd4\x32\xc7\xe7\x3c\xc5\x02\x9e\x60\x12\x26\x31\xe7\xa3\x85\xa6\x71\x50\x34\x0b\xc4\x1a\x21\x08\x4a\x3d\x56\xe5\x42\x56\x06\x1f\xa4\x26\x8b\xec\x99\x25\x7e\x7d\xd3\x64\x22\x25\x7a\xaf\xc1\x99\xe9\xd1\xec\xaf\x23\x8c\xcc\xcd\x70\xb8\x2a\xfc\x23\xec\xf9\xe0\xec\x9d\xae\x09\x7e\xa5\xeb\x47\xc9\x57\x78\x98\x44\x77\x1a\x2b\x19\x3d\xd5\xd8\x13\x2e\x4a\x76\x07\xa5\x97\x4d\x85\xd9\x4c\xbd\x8b\xb4\xde\x21\xf6\x2b\x7e\x3d\x9d\xb4\xa6\x1c\x4d\xe6\xe6\x28\x4d\x9b\x63\xb1\xd0\x64\x0d\x15\x6a\xe4\x8c\x40\xa3\x1c\x4f\x74\x4d\x27\x15\x18\x1f\x4e\xea\xa7\x70\x59\x09\xc2\x30\xc7\x55\xda\x7c\x8d\xee\x40\x25\x1f\xc9\x8f\xaf\x75\x1e\x2e\xb8\xe5\x01\xd3\xcc\x2f\x33\x32\x7f\x01\xc5\x7e\x32\xe7\x02\xf7\x16\x2e\xdc\x99\x19\x2e\xf0\xa8\x8c\x12\xa5\x9f\x61\x89\x7b\xe5\x7d\xd8\x05\x43\x34\x51\x1f\xda\xa1\x03\x1d\x5d\xe5\x60\x07\xd5\x43\xc2\x49\xbc\xd4\xc1\xef\x30\xc5\x61\xe1\x37\x36\x89\xad\xe0\xd8\x11\x9a\x0d\x44\xac\xa5\x66\x31\xf4\x09\x4b\x89\x33\xd5\xfb\xbc\xff\x49\x17\xaf\x82\xf9\xb8\xd6\xd5\x87\x64\x2d\xa7\xf6\xc8\xc9\xc1\xcb\x9b\x46\x6e\xe1\x8a\xee\xe0\x4a\x1f\x3b\xac\x82\xfb\xc3\x6f\xa8\x95\xf5\xa6\xe1\xea\x03\x5e\x5e\x3b\xf1\x57\x6f\xdd\x61\x98\xc3\x5a\xfa\x6a\x7e\x7a\xad\x35\xf5\x1a\x0a\xa1\xc9\x99\xd6\xd5\x6b\x69\xaf\x31\x1b\x6a\x69\x11\xa6\xcf\xd5\x4e\xb8\x46\x22\x90\x33\xc2\x5d\x75\x98\xd3\x04\x76\x7b\x36\xdb\x5c\x74\xe5\x19\x6c\xd6\x5e\x2d\x89\x7f\xcc\x26\x7e\xe1\x05\x0a\xf6\xee\x4f\xc6\x9d\x1d\x58\x74\xe8\x55\x3a\x83\x6e\x6f\xea\x0d\x00\x44\xc9\x37\xac\x3e\xaf\x75\x4a\x20\xc8\x5c\x83\x25\xf5\x16\xa2\x4d\xca\x7e\x35\xcf\x83\x43\x01\xd8\xcf\xdb\xbd\x51\xab\xea\x7d\x21\x98\x26\xbc\xf0\xca\x82\x32\x57\xe3\x14\x85\x0f\x7c\x43\xd1\x62\xb3\xe4\x8b\x25\x84\xb1\x5c\x0c\xa8\x79\x8f\xe1\x1c\x7d\x17\x4e\xb3\x6e\x6b\x76\xa7\x00\xeb\x8f\xf3\x93\x2f\x9e\x0a\xbd\x63\x78\x7e\xc1\x3d\xe1\x6b\x38\xb6\x6f\xc1\xbb\x9b\x18\x0c\xc9\xce\xce\xf6\x10\x25\x8e\xd3\xed\xc1\xc0\xb5\xc2\x36\x36\xd8\xa3\x2f\x28\x18\x24\x3b\x47\x31\xf7\x82\x6c\xa6\x4b\x1c\x67\xdb\x8e\x06\xd9\xa2\xd6\x36\xce\xb6\x1d\x0d\xb2\x45\xad\xbd\x38\xdb\x76\x4f\x90\xcd\x4c\xda\xe4\x59\xed\xd6\x7a\x40\xc4\xfd\x38\x4a\xe4\xfc\x8d\xaf\x86\xe1\x6a\xc4\x24\xf6\xdf\x9b\xa4\x68\x84\x64\x77\xd2\x9a\xd3\xca\xe8\x37\x1e\x81\xa4\xdd\x82\x0d\x7d\x80\xc3\x86\xf6\x41\x97\x49\x8f\xe6\xdc\x25\xbd\x04\x8c\x45\x54\x42\x44\xb8\xde\x79\x81\x14\x08\xf3\x20\x4f\x4f\x31\xb1\x72\xb1\x65\xdd\xfb\x8e\x4b\x3c\x1e\x46\xfa\x06\xb3\x9f\x72\xc9\x76\x64\x4d\x65\xb1\xcc\xb1\xdd\xa5\xda\x5c\xd7\x6c\xdd\x74\x3b\x52\xd3\x1d\x6c\x0c\x7d\x43\x44\x43\x96\xb4\x5b\x93\xb2\x11\x4c\xb5\xc4\xed\x56\x4f\x24\x51\xff\xff\xb9\x2c\xbb\x0f\x56\x67\xb8\xe8\x14\x18\xa4\xd8\xe3\x83\xde\xa0\xcb\xde\x9e\x96\x8b\xcf\x14\x69\xc4\xb1\x3c\x0f\x54\x25\x4c\x91\xab\x45\x07\x3a\x38\x9e\x9a\x32\x87\x90\xe2\xde\x31\xa5\x89\x79\xe4\x17\x67\x96\x70\xce\xd5\xe4\x20\xff\x02\x37\x82\xfe\xf5\xf2\x94\x5c\xae\x78\x0b\x09\xa7\xed\xa8\x59\x05\xfe\xf5\x79\xff\x96\xd7\x49\x4a\x20\x02\x41\x25\xa0\x82\x70\xdc\x3f\xf4\x98\xdb\x5e\x76\x8c\xae\x73\xeb\x2c\x92\x1b\x56\x37\xef\x49\xd9\xb0\x9e\x28\x77\x1b\x8c\xa3\x0c\xca\xdf\xb9\x24\x82\xb1\xb2\x8f\x21\xc9\x86\x74\x1b\x91\x91\x05\xdf\x32\x41\xb8\xec\x49\xb1\xe9\x65\xb3\x76\x64\x80\xeb\x47\x15\x1f\xee\x80\x0d\x51\x10\xc2\xdc\x98\x81\xe4\x51\xd4\x7e\xbb\x59\x6b\x23\x2f\x75\x4e\x9d\xae\xff\xb4\xc7\xbe\x12\xa4\x5a\x4a\xce\xc8\xdd\x74\xe2\xa7\x93\x26\xd6\xf3\x05\xea\xdf\x19\x29\x4f\xc3\x55\xe7\xb1\x10\xdf\x67\xc3\xf2\x4a\x8b\x66\xaa\x6f\xea\x38\x3e\x26\xdf\x51\x5e\xb3\x32\x9f\x6a\xc3\xd1\xac\xae\x17\x64\x76\x6a\xc2\x12\x95\xab\xc1\x47\xcd\x6f\xec\x05\x38\xd8\xc0\x91\xb4\xd4\x2e\x00\x45\x42\xdb\x01\x0e\xbf\xda\x74\x94\x3e\x90\x5d\xd0\xba\xfe\x9f\xac\x6e\x59\x47\x86\xdb\x93\x7a\x89\xf7\xa2\x69\x92\xa6\x39\x1a\x21\x79\x9e\x07\x07\xe5\x3c\xbb\x63\xa0\x2d\x5e\xd3\x36\xb1\x01\xd6\x15\x2e\x17\x13\xb9\xb4\xf9\x90\xfb\x3d\xee\x0a\x4e\xf2\x7b\x26\xac\x93\x82\xce\x97\xdd\x2a\x6c\x3b\x1b\xb0\x88\x15\xa5\x17\xe7\x7a\x34\xc0\xf4\x9a\xb6\x3a\x3a\xad\xc9\x73\xdb\xe3\xb4\x7e\x90\x5d\x74\x21\x58\x4c\x2b\xaf\xa5\x52\xce\x48\x85\xf7\x1d\x6d\xff\xda\xbb\x7b\x58\xcd\x61\xe2\x20\x2d\x34\x62\xd5\xa8\xa6\x90\x9a\x72\xa3\x07\x86\x8b\xc6\xc0\xbe\xb5\x3b\x56\xa0\xd2\xb7\xde\xe5\xb1\x1b\xf1\xfb\xe0\x62\xb7\xa6\x46\x17\xf2\xec\x43\xc0\x09\x84\xce\xc7\xd8\x9d\xc0\x4f\x8a\x19\xd1\xf0\x53\x62\xc1\xcd\x62\xda\xf4\xf2\x23\x1f\x9e\xb1\x70\xc0\xbe\xf2\xf3\xb9\xf6\xdc\x26\xac\x5a\xed\x1f\xf9\xcc\x1d\x37\x3a\xd2\xbd\x09\x41\xb7\x41\xeb\x43\x9a\x9e\xce\x4f\xa7\x83\x4c\x96\x53\xa4\xfb\xb1\x1a\x9b\xa8\x71\x6b\xf5\x49\xb1\x81\xbb\x86\xdb\x41\x1a\xef\x4b\x3a\x20\x3a\x2c\xf2\x56\x7b\x55\xb8\x25\x49\x99\x7b\xe7\x88\x06\xdb\x92\x7e\x3d\xb0\xed\x43\xd9\x32\x8d\xa0\x10\x6d\x60\xf3\x3f\x2d\x57\x84\xeb\x51\x89\x8a\x4b\x17\x0d\x45\x49\x87\x1e\x86\x27\xc7\x8d\x1c\x41\xc6\xc3\x59\xfe\x8f\x0e\x08\x00\x95\xc5\xa5\xfb\xeb\x20\xb3\x21\xbc\x3b\xff\xb2\x9f\xf6\x1c\x4f\x34\x27\x3c\x25\x5f\x61\x0d\x3e\x1c\x37\x95\x32\x37\xc7\xe2\x46\x83\x03\x30\xf2\xde\xd8\x80\xef\x76\x0e\x0c\x5c\x73\x6f\xc2\xa3\x1e\x25\x0c\x71\x7a\x46\x8e\x2c\x32\x10\x66\xd0\x0b\x00\x9e\x28\xb0\xd3\xe9\x88\x5d\x73\x29\x79\xb1\xda\xfd\x78\xe1\x6c\x9b\x0f\x46\x84\xd2\x91\x7c\x1b\xa6\x71\x10\x24\x04\x28\x82\x3b\x5c\xee\x71\x1d\xe0\x6b\x73\xb3\x98\x59\x0e\x4e\x1c\xe1\x9c\xdc\x8f\x17\xd1\x26\xec\xde\x1b\x9c\xdc\x7d\x57\x60\x06\x41\xa4\xc1\x9f\x22\x62\x00\x77\xd6\x7c\x09\xef\x9f\xc1\x51\xbe\xa3\x23\xc2\x9d\xc9\xc9\x2b\x45\x5b\xec\xbc\x60\xf2\xaf\xea\x73\x22\xe9\x22\xfd\x52\x3f\x7f\xa6\xcf\xff\xe9\x7a\x74\x9d\x4f\x86\xd2\x02\x94\xc3\x97\xa9\xf5\x5d\xf2\x3d\x5a\x73\x32\x99\x34\xe1\xb2\x8e\xb5\xe7\x24\x56\x08\xa0\x60\xc6\xc3\xfd\x5e\xba\x1c\x36\x00\xec\x3d\x12\x64\x3f\x78\xde\x3f\x0a\x63\xb8\xeb\x43\xd8\x2c\x23\x0d\xe0\x07\x04\x08\x4e\x15\xa5\x29\x79\x30\x17\xa5\xed\x1b\xf0\x2e\xd8\x58\xee\x49\x03\x9e\x0d\xc0\x1a\xa9\x20\x63\x77\xfe\xb8\x77\xe1\x60\xde\x68\x03\x95\xe2\xdc\xb9\x91\x78\x80\x47\x78\x64\x95\x29\x81\xf5\xaf\x70\xd3\xc2\xd3\x1f\x72\xea\x41\xb6\xaa\x3a\x0e\x07\x1c\x1d\x45\x87\x59\x6c\xba\x35\xba\xbc\x62\x10\x7e\xf8\x24\xee\x7e\x14\x6b\xe3\x1d\x3f\x23\xbd\x77\xdf\x89\xa1\xe8\x13\x99\xd7\x7b\x17\xa7\x0c\x8d\x89\x8c\xdc\x59\x88\x43\x06\x8d\x5d\x8f\x00\x9d\x0e\x63\xa8\x7a\x3b\xff\xd3\x5f\x93\xb6\x26\xde\xa5\x7f\xd4\x92\x94\xc1\x2a\x3d\x3e\x86\xca\x4f\x52\x33\x5a\xaa\x46\x7d\x4b\x0b\xa6\x16\x3a\x04\xb6\x6d\x94\xfb\x15\xde\x02\x44\x17\x10\x83\x91\x74\x01\x27\x6b\xce\xc8\x7f\x90\xff\xd0\x46\xff\x8b\x17\xc6\x52\xa0\x0b\x72\x86\x4d\x4e\xaf\x8d\xd3\xb5\xb0\x07\x83\x83\xea\x0f\x8d\x40\x41\x85\x32\xc2\x8b\xa6\x46\x47\xe5\xf8\x98\x50\xc4\x84\x34\x1d\xa1\xe4\x9f\x9b\x46\x32\xa8\x00\x25\xfd\x4e\x48\x7a\x87\xa9\x24\x40\xf3\x51\x2c\x9f\x21\x96\xe1\x83\xd3\xf8\xc1\x6c\x30\x0f\x5e\x11\xfe\x62\x6e\x62\xd6\x00\xf4\xc3\x87\x08\x86\x79\xf0\x62\x1e\x42\xf1\xeb\x5b\x4c\x78\x1a\xb9\xa0\x00\x5d\x9d\xf2\xeb\x34\xa4\xd4\x8b\xf9\xe9\xb5\x4f\x0d\x98\x71\x69\x38\x27\x1b\x52\x71\x51\xa2\x7f\xa3\x67\x3d\x7f\x7c\xd6\x76\x4e\x95\xcf\xb1\xff\xfa\x2f\xfd\x58\xcf\x55\x5f\xb6\x1c\xcc\x3b\x98\xf5\x60\x46\xff\x44\x3f\x2b\x9e\xd3\x8b\xf9\xbe\x59\xf9\x87\xc3\x6f\x7b\x2d\x05\x5b\x4c\x7e\xff\xac\xe1\xc0\x01\xf4\x77\x02\x26\x9e\xe0\x08\xa9\x67\xf7\x99\xa9\x07\x0b\x65\x36\x1b\x31\x77\xf4\xfe\x1e\x99\x3b\x8f\xd9\xcf\xd6\xa7\x32\x56\x8c\xbd\xec\xe3\x80\x01\x19\x65\x37\x20\x23\x29\x65\x5e\x33\x91\xfa\xb5\x1e\xce\x7e\x01\xa0\x7b\xec\x17\xdf\xcc\xd6\xd6\xe1\x68\xec\x64\x68\x56\x8c\x24\xf3\x7c\x23\x63\x3a\x99\xd0\xc3\x4a\xfb\x37\xd3\xda\xbf\x6e\x53\xfe\x95\x7a\x9b\xba\x7b\xa7\xed\x46\xf8\x44\xbd\x4d\x83\xf3\x70\x87\x35\xf7\xd8\xde\xfa\xb0\xd7\xe9\x39\x88\x26\xea\xee\x41\x51\xe3\x98\xef\x16\x66\xd1\xfa\x28\x32\x8a\xee\xfb\xb8\xcc\x61\x8d\xe4\x21\x99\x33\x76\xbb\xb9\x00\xe3\x80\xc4\xef\x91\x4f\x23\x8d\x91\xfb\xf4\xb8\x60\x72\xf2\xc2\xcd\xc6\x44\x85\x4d\x30\x02\xc5\xb6\x0f\x03\xcc\xff\x96\xd6\x7f\x0d\x69\xb5\x65\x8f\x3d\x9e\x6c\x7f\x0e\x8e\x9f\xb2\x37\x02\xb5\x32\xcc\xfe\xf6\xb2\xdb\x27\xa9\xb8\xdb\x1d\x10\x55\x5f\x1b\x06\x62\x05\x05\x77\xc1\x75\x6a\xd3\xc9\xa4\xd0\x5b\x0b\xf8\x6f\x21\xb3\xed\x75\x5a\x03\x96\x1f\x15\x9f\xe4\x84\x03\x95\x0e\x79\xe1\x36\x40\xf3\x0d\x95\x34\x49\xc9\xd5\xc9\xb5\x77\xba\x14\xe1\xe3\xaf\x55\x81\x88\xcd\x82\xf6\x26\x68\xd9\x6f\x5a\x73\x21\xe7\xce\x46\xa5\xfd\x83\xad\xde\x78\x3a\x78\x12\x95\x48\xec\xdd\x00\xa1\x72\x63\x7f\xc4\xf0\x50\x11\xf8\x34\xfc\x11\x88\x3d\x7d\xa3\xa8\xe9\x92\x8a\xb7\x5e\x67\xf3\x53\x0a\x4f\xea\x2c\x97\x5d\xf3\xfe\x2d\xaf\x35\xcf\x80\x21\x16\x52\x58\xe6\x31\x00\x14\x2f\x30\x1d\xfc\x1e\x06\xd1\x9e\x84\x89\x8b\x9d\x99\xab\x06\x40\x9a\x34\x62\xe3\xb1\x57\xb3\x1e\x21\xb8\xfe\x91\x52\xa6\x98\x7a\x48\xca\x20\x08\x6c\xe2\xc8\x4f\xb2\x79\xfc\x0a\xe6\x21\xae\xf6\x50\x41\xb4\x47\xed\x8b\x28\x87\x1b\xd2\x63\x82\xa1\x3b\xdd\x6c\xaa\x8a\xd9\x7c\xe5\x28\x88\x90\xa9\xfb\x0e\x46\xf8\xe5\x7c\x0e\xf3\x8f\x21\xf0\xf7\x4c\x1c\x22\xaf\x51\x12\xc1\xc9\xf0\xc7\xc8\x8c\xc1\x78\x28\x8a\x82\x45\x36\x10\x91\xbd\xc1\xce\x97\xa1\xb2\x1e\x91\xa1\x68\xf5\x3c\x15\xd2\x3c\xe6\xe7\x27\xa0\x10\xec\xca\x1e\x42\x1f\x43\x6e\xef\xac\xce\x3e\x92\x2b\x87\xfa\xce\x7c\xb9\x9f\x4e\xb6\xa3\x95\xe0\x77\xc3\x1a\xe9\xc9\x1d\x39\x23\x77\xb9\xcd\xc3\xba\xbb\x8e\x30\x19\x9d\xc3\xfe\x26\x78\x9d\x3e\x52\xc8\xb0\xaf\x88\x20\xfa\xf5\x9d\x50\x3b\xa2\x60\x16\x58\x7a\xbd\xcf\xf2\x1e\x7b\x73\x07\x6f\xf6\xfc\x62\xc8\x63\xc5\x14\xfb\x6a\x43\xa3\xa4\xdf\x9d\xfd\x29\xa4\xb1\x5f\x61\xf0\x4e\x47\x7c\x3c\xe2\x26\xdd\x1a\xdd\x69\xf1\x34\xc4\xef\x82\x8b\x28\x9c\xd8\x81\xcf\x07\x1d\x80\xa5\xad\x77\x47\x6f\x20\x28\x5f\xef\x24\xeb\x93\x3b\x72\x75\x0d\x17\x53\xef\x17\x17\xf3\x14\xeb\xc9\x53\xaf\x48\x26\x2c\xe5\x7f\xa6\x4b\xf9\xf7\x97\x48\x9b\x51\xcd\xad\xaa\x6a\x60\xff\x36\x43\xff\x88\xd2\x80\x62\xfe\xc0\xba\x54\x17\x23\x33\xb6\x34\x47\xa3\x13\xbc\x34\xb5\xfe\xe5\x65\x74\xfa\xc9\xab\xcb\xc0\xb3\x48\x83\xca\x0c\xd7\x6d\x70\x06\xca\xeb\xe0\x57\x67\x0c\x7a\xb8\x73\x50\x5e\x0f\xbf\x42\x63\xd0\xc3\x3f\x0b\xe5\xf5\x09\xab\x34\x90\x4c\x67\xc4\xf5\xd6\x97\x36\x3e\x45\x6e\x7a\xe4\xe2\xa8\x4c\xbc\xa6\x6d\x22\x30\x18\xf0\x74\x71\x38\x18\xe4\x8c\x2a\x97\x78\x45\x04\x79\xb5\xcf\x25\xfb\xf0\x81\x08\xf2\x95\x7d\x1b\x67\x5c\x47\xb3\x1c\x48\x0b\xd3\x34\xb0\x84\x09\x17\x7a\x52\x5a\xbc\x04\x7b\x7f\x48\x0c\x06\x22\x60\xda\x0f\xf8\x3f\xe4\x7d\xd4\xd4\x31\x7e\xc8\xf4\xa8\xa9\xc7\x71\x31\x7a\x07\xc0\x18\x13\x0d\x8c\x3d\x7c\x54\x96\xcd\xff\x0b\x3e\xbe\xfc\x15\x2c\x43\x8a\x8c\x31\xec\x7b\x7b\x53\xf2\xff\x07\x86\x89\x83\x1c\x1a\xce\xf3\xb7\x61\x19\x5c\x60\xcd\x33\x72\x1b\x45\xe2\xb6\xb4\x23\x09\x6c\x3b\x70\x91\x8c\x0e\x2a\xe8\xab\x8d\xfb\xe8\xa6\x07\xaf\xfc\x81\x8b\x32\xb2\xb0\xd4\x93\x41\xfc\x2e\xdc\xca\x21\x28\xe1\x8a\x58\xc6\x55\x38\xde\x2d\xdd\x9b\xcb\xb1\x37\x82\x96\x65\xc7\xfa\x1e\x8a\x43\x5c\xd8\xe1\xe1\x23\xa3\x83\x05\xfc\xdc\x84\x17\x13\xd4\x53\x3d\x73\xd7\x94\x62\x18\x05\xf4\xdf\xc8\x19\x48\xcf\x9c\x1d\x04\x89\x10\x10\x0c\xa6\x7b\x07\x11\x23\x1c\x7b\x9f\x08\x7f\xb2\x13\x7f\x4b\x5e\x11\x8e\x1f\xbe\x3a\xe8\xcc\x47\xa4\x45\xc7\x7e\x24\x12\x75\xd3\x6c\x84\xfe\x79\xa7\xb8\x36\xe8\xa2\x4a\xc0\x77\x3f\xbd\xbd\x4e\x3f\xd2\x19\x37\xc7\xb1\x94\x84\x3c\x78\xc7\x86\x46\xa7\xb1\xe7\xd6\xf1\x11\xd9\xd8\x83\xf9\x47\xdc\x43\xde\x6f\x6e\x7a\x8d\x5b\x9f\x11\xb5\x38\xe2\x32\x88\x3d\x0b\xe9\x33\x58\x49\x19\x59\xfd\x7b\x31\xfd\x0b\x2e\xa6\x8f\x96\xcd\xcf\x9e\x22\x9c\x2b\xf2\x8a\xdc\xe2\x87\xa7\x48\xe9\x67\xbf\xa7\x98\x66\x64\xf5\xb8\xa4\xbe\xae\x9b\x5e\x1f\x68\xb1\x3b\xb1\x72\x7e\xbd\x9d\xd9\xf7\xcf\x86\x27\x29\x55\xff\xd0\x8d\x37\x25\x66\x3d\x53\xd3\xdd\x5b\x83\x87\xaf\x3f\xb1\x0a\xaf\x58\x52\xd1\xb1\x62\x3b\xbc\x88\x2d\x23\xe2\x06\x02\x68\xe3\x57\x4f\x25\x38\x2c\x2b\x33\xd2\xe1\xd9\x34\xf3\x43\x39\x6a\x21\x35\x6b\xfc\x45\xd0\xab\x6b\xff\xc8\xc1\xfd\xfd\xc8\xef\x96\x2c\xd3\x07\xac\xa6\x17\x37\xe8\x59\x42\x5f\x7b\x1e\x03\xbe\x66\xc1\xc9\x85\x7b\x5d\x73\x83\x18\xfc\xc8\x60\x24\x9f\x48\xd8\x29\x35\x50\x8f\x8e\x88\x6d\xaa\x23\xba\x2f\x8d\x3d\x73\x76\x46\xe6\x7e\xce\x1d\x5c\xc3\xcc\x1d\xc2\x9a\x28\xe2\x04\x43\x38\x20\xf3\x71\x5b\xc1\xbb\x5c\x0b\x2d\x05\x0d\xc2\x0e\x9d\x06\xc7\x9a\xe2\xf7\xf3\xe1\xaf\xa7\x2c\xa9\xe8\x81\x16\x43\x1e\x0d\x59\x63\xf9\xe6\xc2\x9f\x1f\xc7\x8e\x3d\x3e\x74\x68\x32\xfe\xcb\xf1\x6c\xef\x69\xb1\x0e\xe1\x24\xfa\x6f\x4f\xae\xae\xbb\x8d\x90\x7c\xcd\x2e\xe1\x01\x5c\x42\xd8\xf4\x4c\xe0\xcf\x23\xc0\x8f\xed\xfe\x6d\x44\x94\xf5\x31\xa3\xe1\x5d\xe6\x06\xb0\x77\x81\x7e\xef\x5d\x43\x69\x86\xf5\xa2\x29\x38\xf0\x37\xbc\x4b\xfa\x1c\x4a\xc0\x6d\x44\x45\xbf\xf1\x82\x07\x30\x3e\x5e\xe5\x19\xd2\x33\xec\xf2\x23\x2b\xb6\xd8\x7e\x39\x72\x87\x9a\x1f\x71\xd6\x75\x4c\x83\x13\xb4\x79\xb1\x34\x97\x52\x45\xaf\x5e\x9a\x13\x14\xc5\x72\xf4\x8e\x07\xe8\x6a\x93\xe9\xfb\x10\x2e\x96\x11\xca\x97\x4c\x94\x4f\x45\x79\xec\xaa\x94\xdf\x71\x22\x7b\xaf\xb3\xe8\xf3\x91\xbb\xf3\x1e\x9d\x38\x2c\x53\x77\xa6\xf1\xf1\x35\x50\x8c\xa9\x9b\x97\x36\x2a\xcc\x2b\x4f\x84\x8c\x80\x5d\x15\xd7\x28\x4c\xf0\xeb\x18\x46\x26\xf4\x3a\x39\xa8\xc3\xc6\x7e\x8a\xd1\x03\xfa\x24\x85\x66\x7f\x44\x6a\xbf\x3a\xf3\x16\x68\x61\x34\xac\x59\xa4\xdf\x30\xd6\x7e\xfb\xcf\x0d\xad\x13\x3a\xcf\x08\x3d\x09\x7f\x65\xc5\xe8\x31\x3e\x1f\x77\x69\xa9\x9a\x05\x3f\xd9\xf3\xf2\x44\x1f\xc5\x9c\xc3\x3d\x4e\x27\xbe\xe6\xc0\x33\xb8\x0f\xde\x7b\xc1\x6b\x48\xd8\x9d\xf8\x5f\xe6\x7b\x0e\x5d\xf1\x93\xb1\x17\x87\x34\x53\xc9\x58\x8b\xe6\x91\x9a\xec\x5f\xfb\xc4\x58\xfb\x74\x9e\x66\xd6\xf4\xa7\x27\xa9\x0e\xce\x1a\xfa\x0c\xfa\x6d\xe7\x19\xd9\x9e\x98\x4b\x14\xb6\xbc\xe7\x92\x95\x4a\xbf\x9f\x5c\xc7\x3b\xb5\xa5\x5e\x45\x9e\x6d\xe7\x70\x72\xb1\xe6\x25\x86\x67\x9e\x6d\x4f\xbc\x07\x1e\xe6\x61\xcb\xa3\xa3\xb0\xa5\x3d\x00\x37\xd7\x47\xaf\x15\x35\xb6\x27\xe6\xcb\x28\x05\x82\xe6\xfb\xcb\xc5\xa3\x8c\xae\xd7\x2a\x53\xfd\xad\x71\xa4\x40\x1c\x6c\x7b\xe2\xc7\x53\xbd\xc3\x40\xdb\x79\x7c\x50\x5a\xa7\x82\xdc\x8f\x87\x64\xd1\x41\xe7\x9f\xf5\x75\x91\x4e\xab\x1b\x82\x9b\x12\xa3\xed\x1c\x03\xb4\x67\xd8\xf0\xea\xe5\x35\x1c\x87\x39\x09\x9f\xce\xaf\xc3\xf3\xce\x28\x7e\xee\x4c\x96\x81\x6a\x37\x52\xfd\x20\x23\x03\xb6\xde\xe3\x88\x99\x1e\xe3\xe1\x89\x73\x0c\x72\x1e\x73\xff\xf0\xa3\xbb\x26\x19\x5f\x99\x7c\x08\x32\x36\xc8\x8e\x8c\x1e\xd7\xd6\xdd\xfc\x7c\xa1\xc7\x82\x47\xe6\x4d\x3b\x22\x94\xe3\x31\x37\x07\x39\x30\x20\x85\x63\x63\x5a\xcf\xcf\xcb\x98\x81\x1f\x46\x0e\xa2\x8b\xe8\xf4\xf9\xc8\xca\xb1\x59\x7d\xa0\x9e\xf7\x05\xa9\xfd\xc8\xa1\xf4\x70\x12\xc3\x3c\x45\x48\xbe\x0f\x1f\x06\xe4\x33\xd9\x24\xd7\x08\x45\x45\x7f\x0b\x47\x19\x43\xdf\x5c\x62\xb3\x3d\x71\x1f\x35\xea\xe1\x41\x82\x5f\x05\xc3\xbf\x56\xca\xb2\xc7\x1d\xf2\xff\x44\xd2\x9b\xab\x00\x60\x64\xef\xcb\xa7\x92\x5e\xe7\x46\x1f\x95\xd9\x11\xc9\x79\x82\xc0\x86\xf2\x6a\x44\x15\x6e\x60\x05\x72\xbc\xa1\xed\xdf\xd8\xae\x37\x12\xab\xac\x41\xf5\x32\x7d\xb2\xe4\x9a\x9b\x63\x51\xab\x00\x60\x53\x1f\x08\x7b\x1d\x8e\x81\x22\xba\xd2\x96\x50\x0d\x1b\xdd\xf6\x24\x7e\x03\xfa\x9d\xd6\x03\x0d\x4f\xeb\x93\xe8\xd1\x90\x31\xb4\x9e\x83\x91\x72\xf2\x2b\x58\x11\x57\x31\xec\x95\xef\xc3\xb5\x02\x7b\x59\x12\x78\xf1\xe3\x45\xe9\x6a\x0d\x9e\xf7\x30\xab\xa7\xa4\x02\xd5\x26\xaa\x73\x81\x4f\x69\x7d\xe2\x32\x87\xce\x45\xfb\xbf\x01\x00\x00\xff\xff\x07\xe4\x39\x04\x8f\x8d\x00\x00"), - }, - "/src/reflect/reflect_go111.go": &vfsgen۰CompressedFileInfo{ - name: "reflect_go111.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 220471985, time.UTC), - uncompressedSize: 3460, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\xdf\x6f\xdb\x36\x10\x7e\x16\xff\x8a\x8b\x30\x04\x64\xad\x28\xc9\x6b\x5a\x07\x28\x86\xa2\xcb\x86\xb6\xc3\xd2\x76\x0f\x41\x50\x30\x32\xa5\xd0\x95\x8e\x02\x49\x39\xce\xd2\xfc\xef\xc3\x91\xb4\xfc\xa3\x2e\x92\x02\xdb\x8b\x21\xf3\xc7\xdd\x7d\xdf\x7d\x77\xc7\xe3\x63\x98\xdc\x0c\xba\x9d\xc1\xdc\xb1\xf5\x9f\x83\xc6\x9c\x96\xa7\xa7\xe5\x29\x63\xbd\xac\xbe\xca\x46\x81\x55\x75\xab\x2a\xcf\x98\xee\x7a\x63\x3d\x70\x96\xe5\x03\x3a\x59\xab\x9c\xb1\x2c\x6f\xb4\xbf\x1d\x6e\xca\xca\x74\xc7\x8d\xe9\x6f\x95\x9d\xbb\xf5\xc7\xdc\xe5\x4c\x30\x56\x0f\x58\x41\xa7\xfc\xad\x99\xfd\xa5\x2a\xa5\x17\xca\x72\xd3\x83\xf3\x56\x63\x53\xc0\x02\x3e\xcb\x76\x50\x05\x68\xd0\xe8\x05\xf0\x2f\x05\x78\x78\x61\xfd\x7d\xaf\x0a\xa8\x11\xa2\xbb\xf2\x4f\xa3\xd1\x2b\x2b\xe0\x81\x65\x0b\x69\xa1\xb7\xa3\x11\x96\xe9\x1a\x16\xa5\xbf\xef\xcb\x3f\x34\xce\xb8\x80\xe9\x14\x2e\xe8\x74\x2d\x2b\x45\x17\x32\xef\xe1\x6c\x0a\xfc\x85\x5e\xad\x7e\xbc\xef\x95\xe0\xdb\xb6\x79\xb0\x21\x04\xcb\xc8\xa0\x86\x57\x70\x02\xdf\xbe\x81\x86\xf3\x29\xb4\x0a\xb9\xf7\x65\xc4\xe1\x42\x14\x59\xd6\x4b\xd4\x15\xcf\x13\x49\x67\x10\xac\xa0\x6c\x41\x59\x6b\x2c\xfd\x5f\xc8\x56\xcf\x12\x7a\xd0\x38\x53\xcb\x9c\xcc\x3f\xb2\x2c\xeb\x28\xa2\xc3\xb5\xcd\x2b\x7d\x1d\x1d\x1f\x78\x5f\xa2\xec\xd4\x87\xba\xe6\x5d\xf8\x12\xa5\x76\x6f\x96\x94\x00\x45\xe8\xf6\xfa\xce\x61\x02\xa6\x87\x09\xe4\x60\x6a\x18\x50\xa5\xf3\xc9\xf9\xe8\xd6\xc3\x14\xbc\x27\xa0\xc9\x01\x41\x66\x59\x16\xf8\x0c\x5b\xbb\xbe\xe9\x97\x0b\x96\x3d\x82\x6a\x5d\xa4\xb3\x73\x14\x7c\xa4\x7c\xe5\xe8\x5d\x84\xc1\x13\x7d\x83\x46\xcf\xb5\x20\xee\xc2\x27\x11\xd8\x39\xf1\x9f\x31\xd7\x6d\x10\x16\x03\xf9\x7f\x39\x8b\x3e\xd6\xb4\x75\xdb\xbc\xcd\x5d\xf9\xb6\x35\x37\xb2\x2d\x7f\x95\x6d\xcb\xf3\x5f\xa2\x89\x4b\xe5\xf3\x02\xe6\x8e\xe4\xb6\x92\x57\x79\x41\x68\xb8\x16\xe5\x5b\xe5\x79\x4e\x06\x72\x51\x5e\x06\x29\x07\x9e\x59\x66\xab\x85\x8d\x0c\x9b\x9b\xb9\xaa\x3c\x2d\x93\x24\xdd\xdf\x56\xf6\xbd\x9a\x25\x53\x01\x55\x38\x3b\xdd\xf6\x51\xbe\x57\x77\x9c\x36\xa2\xb9\x1a\x61\xba\x53\x47\x61\x37\x04\x40\xfe\x45\xf9\x29\xec\x72\x12\xbf\x55\x7e\xb0\xc8\x1e\x53\xe9\xf2\x54\xa1\x02\x2a\x42\xb6\x51\xba\x1a\xe1\xea\x3a\xed\xa5\x8f\x55\x75\xf2\xc0\x1a\x40\x2a\x64\x96\x51\x0c\xb0\x13\xc3\x2a\xf8\x17\x73\x57\x7e\x08\x38\x59\x26\x52\x31\xd7\xad\x6c\x0e\xe9\x27\xca\x0a\x0e\xa6\x70\x12\xe0\x52\x7b\x08\x7d\x61\xfa\x7d\x53\x29\x60\x41\x51\x79\x1e\xef\x8b\xf3\xf3\xb5\x85\xcb\x5b\x5d\x7b\xb1\xe6\x6b\x93\xda\x1f\x72\xfb\x24\xb9\xc4\xee\x46\x5d\x8c\x42\x89\x80\xbf\x23\x7d\xed\x74\x93\xf0\xb5\x97\xf2\x13\xce\x54\xad\x51\xcd\x28\x6f\x81\x0a\x32\x33\x05\xd4\x6d\x70\xb0\xad\xe0\x32\x70\x1e\x14\x77\x16\xb2\x43\x32\xa6\xa3\x94\x38\xaf\x0d\xe6\x22\xd9\x71\x97\xad\xae\x14\x49\x8a\xd4\x3a\x85\x9c\xee\x84\xb5\x9c\x65\x48\xeb\xbe\x7c\x3f\x74\x17\x38\x2a\x2d\x5e\x78\x58\xb5\xa4\xf2\xc2\x7d\x96\x56\xcb\x99\xae\x7e\x54\x4d\xa3\xc9\x10\x85\xc1\xa3\x45\xba\xb0\x15\x4e\x28\x28\x5d\x87\x86\xaa\x51\xc0\x2b\xc0\xa7\xcc\xdd\x69\x7f\x0b\xde\x18\xa8\xd5\x1d\x68\xec\x07\x0f\xd2\x36\x43\xa7\xd0\xbb\x7d\x26\xcf\x7f\xc2\x64\x27\xf1\xfe\x47\x36\x37\x12\xab\x6b\xd8\x47\x01\x1e\x1d\xfd\x24\xa2\x67\x83\xd9\xa5\xfc\xf0\xf0\x79\xf8\x9e\x09\x8d\x65\xb5\xb1\xf0\xa5\x80\x25\x25\xdf\x4a\x6c\x14\xd5\x73\xc2\xba\xdc\x1a\xa4\xb1\x15\x3f\xd5\x41\x07\xa7\xb1\x81\x7f\x94\x35\xb1\x5f\x8c\x4e\x77\x7c\x6a\x72\x78\xf2\x32\xcc\x57\x7c\x09\x7a\x32\x19\xbd\xfa\x02\xbc\xb4\x0d\x1d\xd0\x78\xa5\xaf\xcb\x50\x75\xa2\x20\xee\x91\x6b\xf1\x12\x0e\x96\xbe\x7c\xed\x9c\x6e\x50\xde\xb4\xea\xa3\xe1\x74\xfe\xe9\xee\x1e\x63\xa3\x85\xa5\x1f\x3b\x6d\xd8\x91\x0e\xa8\x41\x85\x3d\xb2\x35\xee\xae\x83\xa6\x64\xac\xea\xe1\xf0\x70\x9f\x0e\x8e\x8f\xa1\xb7\xaa\x97\x56\x81\x0b\xc7\x08\xa7\x55\x9d\xd4\x48\x7e\x17\x44\x87\x5b\x0d\xae\x55\x16\x8f\x00\x59\x96\xb9\x55\x5d\xbe\x93\x5f\x55\xf0\xc1\x03\x58\x14\x05\x74\x05\x74\x14\x86\x6a\x55\x17\x4b\x34\x6c\x94\x6f\x5a\xd5\x85\xb6\xb5\x4b\x67\xb7\xa6\x33\x5b\x26\x16\x71\x12\xc6\x64\xe4\x97\xd6\x96\x89\xd5\x3d\x64\x92\xa3\x44\xe6\x77\x6c\x56\x12\xd1\x78\x18\x9c\x7a\x92\x47\x32\xb3\xbd\xab\x31\x65\x83\xa2\x0e\x0a\x8f\xc0\xd7\xa3\xf0\x52\x79\xbe\x5c\xc9\xdf\x58\xdd\x5c\x60\x04\x40\xd2\x08\xcd\x5e\x7e\x55\x3c\x4d\x99\x02\x70\x72\x4a\x87\x2b\xd3\xdf\x73\x8d\x57\x67\x78\x5d\x40\xbc\x15\xda\x39\x5e\xe1\x35\x4c\x63\x32\x62\x07\x44\x8d\x1b\xe4\x87\xa4\xd2\xd2\xc1\x46\xe3\x7b\xaa\xc1\xde\x59\x83\xcd\xa8\x6a\xa8\xcc\x10\xb5\xfd\xc8\x32\x34\x83\x1f\x9b\xe8\x87\x81\x86\x0a\xcb\xa4\x6d\xdc\x6b\x6b\xe5\x3d\xed\xac\xdf\x07\x61\xe8\x87\xf5\x3c\x8e\x92\x31\x00\x91\x0a\xa4\x80\x54\x04\x5b\x65\x39\x9a\x23\xae\x12\x6f\x05\x0c\x78\x67\x65\xff\xbb\x8b\xf3\x93\xa7\x42\x09\x16\x4a\x19\x72\xfb\xd1\xec\x83\x93\x8f\x45\x45\xaf\xf7\xce\x20\x95\x19\xea\x56\x8c\x13\x4a\xa4\xa7\x88\x72\x43\xeb\xc3\x7b\x8f\x26\xcc\x6f\xaa\xed\x95\xe5\x73\x57\x5e\xa4\x27\x5b\xf2\x5c\x93\x62\x69\x90\x05\xdf\x31\x52\x62\xc1\xdd\x69\x5f\xdd\x42\x60\xe8\x81\x65\x95\x74\x0a\x4e\xce\x68\xe8\x85\x67\x06\xf9\x4c\xab\xa7\x1b\xab\x29\xd1\x0f\x94\xf5\xf0\xc5\x7d\x49\xbc\x9e\x88\x02\x76\x00\xaf\x96\x53\xa0\xa2\x80\x13\xf1\xc8\xb2\x99\xaa\xe5\xd0\xfa\x64\x33\xbc\x18\xb7\x15\x64\x06\xbf\x55\x43\x91\x6c\x3a\x1b\x27\xbf\xf2\x57\xfa\x3a\x09\x6f\x33\x04\xbd\x3f\x04\xbd\x0e\x61\x54\x75\x88\x25\x69\x3a\x01\xb3\xca\x13\xad\x8f\xec\xdf\x00\x00\x00\xff\xff\x88\x68\xc5\x60\x84\x0d\x00\x00"), - }, - "/src/reflect/reflect_go1111.go": &vfsgen۰CompressedFileInfo{ - name: "reflect_go1111.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 220613759, time.UTC), - uncompressedSize: 3559, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\xdf\x4f\xdc\xb8\x16\x7e\x8e\xff\x8a\x43\x74\x85\x6c\x08\x01\x5e\x69\x07\xa9\xba\xaa\x7a\xb9\xab\xb6\xab\xa5\xed\x3e\x20\x54\x99\x8c\x13\x3c\x4d\x8e\x23\xdb\x19\x86\xa5\xfc\xef\xab\x63\x7b\x92\x19\x3a\x15\x54\x5a\x69\x5f\x50\xc6\x3f\xbe\xf3\x9d\xef\xfc\x32\xc7\xc7\x70\x78\x33\xe8\x76\x0e\x0b\xc7\xa6\x1f\x8d\x39\x2d\x4f\x4f\xcb\x53\xc6\x7a\x59\x7d\x93\x8d\x02\xab\xea\x56\x55\x9e\x31\xdd\xf5\xc6\x7a\xe0\x2c\xcb\x07\x74\xb2\x56\x39\x63\x59\xde\x68\x7f\x3b\xdc\x94\x95\xe9\x8e\x1b\xd3\xdf\x2a\xbb\x70\xd3\xc7\xc2\xe5\x4c\x30\x56\x0f\x58\x41\xa7\xfc\xad\x99\xff\xa1\x2a\xa5\x97\xca\x72\xd3\x83\xf3\x56\x63\x53\xc0\x12\xbe\xc8\x76\x50\x05\x68\xd0\xe8\x05\xf0\xaf\x70\x60\xfd\x7d\xaf\x0a\xf0\x70\x40\x97\x3f\x85\x1f\x35\x42\x34\x5c\xfe\x6e\x34\x7a\x65\x05\x3c\xb0\x6c\x29\x2d\xf4\x76\x84\x63\x99\xae\x61\x59\xfa\xfb\xbe\xfc\x4d\xe3\x9c\x0b\x98\xcd\xe0\x82\x4e\xd7\xb2\x52\x74\x21\xf3\x1e\xce\x66\xc0\x0f\xf4\x7a\x95\xe0\x05\xdf\xc6\xe6\x01\x43\x08\x96\x11\xa0\x86\xd7\x70\x02\xdf\xbf\x83\x86\xf3\x19\xb4\x0a\xb9\xf7\x65\xf4\xc8\x05\x16\x59\xd6\x4b\xd4\x15\xcf\x93\x5c\x67\x10\x50\x50\xb6\xa0\xac\x35\x96\x7e\x2f\x65\xab\xe7\x49\x07\xd0\x38\x57\xab\x9c\xe0\x1f\x59\x96\x75\xc4\x68\x7f\xc2\xbc\xd2\xd7\xd1\xf0\x9e\xf7\x25\xca\x4e\x7d\xac\x6b\xde\x85\x2f\x51\x6a\xf7\x76\x45\xa1\x50\xe4\xdd\x4e\xdb\x39\x1c\x82\xe9\xe1\x10\x72\x30\x35\x0c\xa8\xd2\xf9\x64\x7c\x34\xeb\x81\x74\x58\x2b\xfc\x83\x04\xde\x93\x06\xc9\x76\x50\x83\x2e\x06\xb1\x67\xb0\x83\x18\xfd\xe5\x82\x65\x8f\xa0\x5a\x17\xb5\xee\x1c\x79\x16\xe3\xb1\x66\xf1\x3e\xfa\xc8\x93\xb6\x83\x46\xcf\xb5\x20\x61\xc3\x27\xa9\xdb\x39\xf1\x8f\xc9\xda\x6d\xa8\x19\x89\xfc\x6b\x82\x46\xf3\x93\xa6\xdd\x53\x51\x17\xae\x7c\xd7\x9a\x1b\xd9\x96\xff\x95\x6d\xcb\xf3\xff\x44\xfc\x4b\xe5\xf3\x02\x16\x8e\x40\xd7\x89\x59\x5e\x90\xab\x5c\x8b\xf2\x9d\xf2\x3c\x27\x80\x5c\x94\x97\xa1\x08\x42\x10\x58\x66\xab\xa5\x8d\xf2\x9b\x9b\x85\xaa\x3c\x2d\x53\x32\xbb\x3f\xad\xec\x7b\x35\x4f\x50\xc1\xe5\x70\x76\xb6\x6d\xa3\xfc\xa0\xee\x38\x6d\x44\xb8\x1a\x61\xf6\xa4\x02\xc3\x6e\x20\x40\xf6\x45\xf9\x39\xec\x72\x72\xc9\x2a\x3f\x58\x64\x8f\xa9\xfc\x79\xaa\x72\x01\x15\x79\xb6\x51\xfe\x1a\xe1\xea\x3a\xed\xa5\x8f\x75\x5d\xf3\x20\x29\xc0\xd4\x05\x58\x46\x34\xe0\x09\x8d\x35\xff\x83\x85\x2b\x3f\x06\x57\x59\x26\x52\x27\xa8\x5b\xd9\xec\xd3\x9f\x98\x76\xb0\x37\x83\x93\xe0\xf1\xd7\x02\x7c\x68\x2a\xb3\x1f\x7b\x53\x01\x4b\x22\xe6\x79\xbc\x2f\xce\xcf\x27\x84\xcb\x5b\x5d\x7b\x31\x49\xb6\xa9\xee\x4f\xe5\x7d\x56\x5f\x12\x78\xa3\x6e\x5e\x92\x48\x21\x71\x76\x05\x65\x62\xb4\x19\x90\x89\x42\xf9\x19\xe7\xaa\xd6\xa8\xe6\x14\xd7\xa0\x13\xc1\xcc\x00\x75\x1b\xac\x6f\xa7\x7f\x19\x62\x12\x32\xf2\x2c\x44\x8f\x6a\x80\x8e\x12\x3d\xaf\x0d\xe6\x22\xe1\xb8\xcb\x56\x57\x8a\x52\x8e\xb2\x79\x06\x39\xdd\x09\x6b\x39\xcb\x90\xd6\x7d\xf9\x61\xe8\x2e\x70\xcc\xc4\x78\xe1\x61\xdd\xec\xca\x0b\xf7\x45\x5a\x2d\xe7\xba\xfa\x59\x29\x8e\x90\x81\x85\xc1\xa3\x65\xba\xb0\x45\x27\x54\xa3\xae\x43\xab\xd6\x28\xe0\x35\xe0\x73\x70\x77\xda\xdf\x82\x37\x06\x6a\x75\x07\x1a\xfb\xc1\x83\xb4\xcd\xd0\x29\xf4\x6e\x17\xe4\xf9\x2f\x40\x76\x12\xef\x7f\x86\xb9\x11\x75\x5d\xc3\x2e\x09\xf0\xe8\xe8\x17\x3d\x7a\xb1\x33\x4f\x25\xdf\xdf\x7f\x99\x7f\x2f\x74\x8d\x65\xb5\xb1\xf0\xb5\x80\x15\x05\xdf\x4a\x6c\x14\xd5\x7b\xf2\x75\xb5\x35\xa2\x63\x1f\x7f\xae\xfd\x0e\x4e\x63\x03\x7f\x29\x6b\x62\x3f\x19\x8d\x3e\xb1\xa9\xc9\xe0\xc9\xab\x30\xb9\xf1\x15\xe8\xc3\xc3\xd1\xaa\x2f\xc0\x4b\xdb\xd0\x01\x8d\x57\xfa\xba\x0c\x25\x29\x0a\xd2\x1e\xb9\x16\xaf\x60\x6f\xe5\xcb\x37\xce\xe9\x06\xe5\x4d\xab\x3e\x19\x4e\xe7\x9f\x1f\x0d\x91\x1b\x2d\xac\xfc\xd8\x89\xc3\x8e\x74\x40\x5d\x3f\xec\x11\xd6\xb8\x3b\x91\xa6\x60\xac\xeb\x61\x7f\x7f\x57\x1e\x1c\x1f\x43\x6f\x55\x2f\xad\x02\x17\x8e\x91\x9f\x56\x75\x52\x23\xd9\x5d\x92\x1c\x6e\x3d\xf5\xd6\x51\x3c\x02\x64\x59\xe6\xd6\x75\xf9\x5e\x7e\x53\xc1\x06\x0f\xce\xa2\x28\xa0\x2b\xa0\x23\x1a\xaa\x55\x5d\x2c\xd1\xb0\x51\xbe\x6d\x55\x17\x7a\xda\x53\x39\xbb\x49\xce\x6c\x95\x54\xc4\xc3\x30\x63\xa3\xbe\xb4\xb6\x4a\xaa\xee\x10\x93\x0c\x25\x31\x7f\x50\xb3\x92\x88\xc6\xc3\xe0\xd4\xb3\x3a\x12\xcc\xf6\xae\xc6\x14\x0d\x62\x1d\x32\x3c\x3a\x3e\x8d\xca\x4b\xe5\xf9\x6a\x9d\xfe\xc6\xea\xe6\x02\xa3\x03\x94\x1a\x61\x12\xc8\x6f\x8a\xa7\x29\x54\x00\x1e\x9e\xd2\xe1\xca\xf4\xf7\x5c\xe3\xd5\x19\x5e\x17\x10\x6f\x85\x5e\x8f\x57\x78\x0d\xb3\x18\x8c\xd8\x01\x51\xe3\x86\xf8\x21\xa8\xb4\xb4\xb7\xd1\xf8\x9e\x6b\xb0\x77\xd6\x60\x33\x66\x35\x54\x66\x88\xb9\xfd\xc8\x32\x34\x83\x1f\x9b\xe8\xc7\x81\x26\x0e\xcb\xa4\x6d\xdc\x1b\x6b\xe5\x3d\xed\x4c\xef\x87\xf0\x28\x08\xeb\x79\x9c\x33\x23\x01\x91\x0a\xa4\x80\x54\x04\x5b\x65\x39\xc2\x91\x56\x49\xb7\x02\x06\xbc\xb3\xb2\xff\xbf\x8b\xc3\x95\xa7\x42\x09\x08\xa5\x0c\xb1\xfd\x64\x76\xb9\x93\x8f\x45\x45\xff\x21\x74\x06\xa9\xcc\x50\xb7\x62\x9c\x50\x22\x3d\x55\x94\x1b\x5a\x1f\x1e\x8b\x34\x61\xfe\xa7\xda\x5e\x59\xbe\x70\xe5\x45\x7a\xef\x25\xcb\x35\x65\x2c\x0d\xb2\x60\x3b\x32\x25\x15\xdc\x9d\xf6\xd5\x2d\x04\x85\x1e\x58\x56\x49\xa7\xe0\xe4\x8c\x86\x5e\x78\x86\x90\xcd\xb4\x7a\xba\xb1\x9a\x02\xfd\x40\x51\x0f\x5f\xdc\x97\xa4\xeb\x89\x28\xe0\x89\xc3\xeb\xe5\x44\x54\x14\x70\x22\x1e\x59\x36\x57\xb5\x1c\x5a\x9f\x30\xc3\x73\x73\x3b\x83\xcc\xe0\xb7\x6a\x28\x8a\x4d\x67\xe3\xb3\x40\xf9\x2b\x7d\x9d\x12\x6f\x93\x82\xde\x4d\x41\x4f\x14\xc6\xac\x0e\x5c\x52\x4e\x27\xc7\xac\xf2\x24\xeb\x23\xfb\x3b\x00\x00\xff\xff\x8a\x4a\xd5\xea\xe7\x0d\x00\x00"), - }, - "/src/reflect/reflect_test.go": &vfsgen۰CompressedFileInfo{ - name: "reflect_test.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 533068494, time.UTC), - uncompressedSize: 4512, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\x6d\x6f\xdb\x38\x12\xfe\x6c\xfd\x8a\x39\xdd\x5d\x2b\xdd\x09\xb2\x65\xb7\x29\xa0\x22\x1f\xd2\xbc\x14\x59\xb4\xf1\xa2\x0e\x76\x3f\x18\xde\x05\x23\x8d\x2c\x36\x14\xa9\x25\x29\xa7\x5e\x43\xff\x7d\x41\xda\x96\xe5\xb7\xc6\x7d\x03\xea\x90\x33\xcf\x3c\xe4\xf0\x19\x8e\xe9\x6e\x17\xfe\xff\x50\x51\x96\xc2\x67\xe5\x38\x25\x49\x1e\xc9\x14\x41\x62\xc6\x30\xd1\x7f\x6a\x54\xda\x71\x68\x51\x0a\xa9\xc1\x73\x3a\x6e\x41\x74\xee\x3a\x1d\x77\x05\x30\x43\x83\xa1\x7c\xea\x3a\xbe\xe3\x64\x15\x4f\xe0\x1e\x95\xbe\x60\x74\xca\x0b\xe4\xda\xd3\xf0\xbf\x15\x22\xbc\xf7\x61\xe1\x74\x74\x38\x7a\xa4\xa5\xe7\x3b\x75\x0b\x3f\x62\x34\xc1\xe1\x0c\x65\xc6\xc4\xd3\x89\x31\x37\x15\x4f\x3e\x90\xb9\xa8\x4e\x5d\xe4\x42\x4a\x32\x1f\x66\x57\x54\x62\xa2\x6f\x33\x92\xe0\x89\x81\xf7\xf3\x12\x19\xe5\x8f\x6a\x24\xa4\xc6\xf4\xc4\xa8\xf7\x97\xef\xa8\x56\x27\x82\x2f\x73\xc2\x2f\x18\x13\xc9\x89\xf8\x3b\x52\xe0\xbb\xb9\x46\x75\x21\xd1\x1e\xf6\xc9\xdb\x1a\x66\x99\x42\xfd\x41\x24\x8f\xa7\x6a\x83\x46\xea\x21\xbf\xe5\x33\xc2\xe8\x81\x65\x56\xc5\x10\x2e\x81\xde\x78\xb2\x6d\xb8\x24\x0a\x17\x4e\xa7\x63\xfe\x77\xae\xa8\x8c\x01\xb6\x01\x9f\x30\x99\x05\xc6\x69\x0e\x21\x6e\x9c\xbf\x11\x56\xe1\xa2\x36\x9e\x3a\x80\xa3\xd1\x23\xe4\xe9\xd7\xa3\x3b\x06\xb2\xe3\x19\x66\x5e\xe4\xef\x51\x6f\x33\x5f\x61\x46\x2a\xa6\x97\x28\xa7\x53\xef\x1c\x8b\x96\x55\xa2\x87\xd9\x0d\x45\x96\x1a\x39\x8e\x1e\xa7\xbb\x86\xba\x87\x19\xbe\x3b\xf0\xfa\x8b\xb9\x98\x9f\x2a\x86\xc7\xcb\xec\x39\x8e\xf7\x97\xdf\x1d\x7a\xc1\xa6\xdf\xbf\x2c\x72\x94\x34\xf9\x11\x8a\x53\xee\xf1\x73\x1c\xbf\x53\x9d\xdf\x72\x8d\xf2\x87\x58\xee\x85\xf8\x48\xf8\xdc\x56\xc2\xc9\x4a\xcc\x88\x84\x14\xb1\xbc\xfe\xab\x22\xcc\xb0\x29\x38\x87\xf1\xe4\xaa\x6d\x5a\x38\x9d\x6e\x17\xec\x94\x6a\x8a\xca\xe9\x2c\x38\x65\x01\xd8\x0f\x2d\x2b\x34\x75\xb9\x88\x02\x88\x5a\x53\xca\xf5\xa0\x6f\xaa\x1b\x36\xa3\xc6\xd9\x0b\x5f\x07\x60\x3f\x1a\x53\xc6\x04\x31\xb8\x5e\xf8\xda\x0f\x60\x7b\xd6\x80\xdc\x1c\x19\x13\x6e\x00\xcd\xa0\x71\x15\xe4\x11\xbd\xf1\x84\x72\x1d\x40\xd4\xf3\x03\xd8\x33\x34\xd0\x17\xe3\x81\x31\x9b\x1d\xf7\x03\x18\xd4\x01\xec\x5b\x1a\xf0\x3b\xa2\x68\x62\x1c\xbd\xf0\x75\x1d\xc0\xce\xb4\x81\xa1\x94\x42\x7a\x9c\x32\x3f\x80\xf6\xb8\xb5\xbf\x72\x4c\xb9\x9e\x28\x2d\x29\x9f\x2e\xa2\x18\x5c\xc1\xd1\x0d\xa0\x1f\x83\xab\x9f\x84\x5b\x9b\x2d\x6f\x61\xd6\x9e\x00\xd6\xe8\xf6\x8a\x19\x8f\x02\xc8\x78\xbf\x31\x59\x95\x6e\x39\xb6\x75\x5a\x26\x94\x11\xa6\x0e\xab\xd2\xf7\xdb\xde\x95\x2c\x67\x6d\xdb\x31\x5d\xce\xb6\x22\xdb\xc2\xcc\xdd\xb6\xe7\xeb\xba\x44\x5b\x2c\xcf\x09\xf3\xaa\x6e\xa3\x8f\x2b\x73\x76\x04\xd7\xa0\xfa\xcb\x49\x7b\x97\x47\xd4\x19\x7c\x9b\x3a\x27\x30\xda\xb8\x2f\x3f\x8f\xf1\x47\x79\x0e\xa2\x8f\xaf\xb5\xe1\xb1\xd7\x3f\x6a\x5b\xa2\x55\x4f\x68\x55\xcf\xb2\x48\x07\xdb\xb6\xc1\x9e\x6d\x3c\xb1\x15\xb1\x58\x44\x75\x1d\x40\x33\xeb\xd7\x3b\x3b\xd7\x79\x78\x47\xee\x3c\x5b\x46\x9b\x71\xbb\x82\xa2\x89\xad\xd1\xb3\x57\x2d\xb4\x2d\xa4\x23\x8e\x13\x62\x15\xb2\x6c\xd1\xbe\x7a\xe3\xc3\xb8\x23\xe6\x76\x96\xa7\xf1\x9b\xd3\x5f\x21\x0f\x44\xc4\x10\xad\x14\xda\xc5\x44\x31\xf4\xf7\xa4\x7e\x8e\x68\x67\x75\xdb\x45\xee\x28\x83\x99\x02\x2c\x4a\x3d\x8f\x81\x0b\x0d\x3a\x47\x50\xa4\xc0\xd0\xa6\x61\xc4\xb1\x09\x53\xae\x57\x8d\xae\x9d\x65\xdb\xbd\x73\x70\x9b\x80\xf6\x78\xaf\x4b\xae\x02\x5b\xd3\xbd\x65\x8e\x43\xf7\xce\x72\x9b\x62\xdf\xd2\x4e\xfd\x23\x55\x05\xd1\x49\x8e\x29\xe8\x79\xb9\x6e\xa2\x51\xd8\x3b\xda\x46\xcf\x5e\x79\xd1\x7e\x1b\x6d\x3a\xe2\xee\xc1\x6c\x9a\xdb\x5e\xb7\xdb\xeb\x84\xcb\x17\xc1\xa2\x6e\xf5\xbf\xc3\x1e\x57\xb9\x5f\xeb\x8d\x77\x42\xef\x58\xb6\xcf\xb1\xfa\x19\xdf\x4c\x6b\x4a\x7b\x8c\xbf\x0a\xa5\xe8\x03\x43\x60\x42\x94\xca\x54\xcd\x0b\x33\x8a\x02\x58\xff\x5d\x2b\xd4\xed\x6e\xbb\x9a\x2f\x34\xe8\x76\xe1\x7e\x78\x35\x8c\xe1\x86\x7e\x69\x18\xe6\x6b\xdc\xfc\x00\xc7\xc6\x79\x8c\xa5\x76\x9c\xb6\x01\x74\x4e\x55\x08\x23\x44\xc8\xb5\x2e\x55\xdc\xed\x4e\xa9\xce\xab\x87\x30\x11\x45\x77\x2a\xca\x1c\xe5\x67\xb5\x19\x50\xa5\x2a\x54\xdd\x37\x67\x83\x70\xf3\x00\xbb\x35\xc6\x7e\xbf\xf7\x66\xb0\xff\xea\x2a\x20\x3e\xdf\x7b\xf3\xdf\x09\xbe\x7c\x34\x63\x7a\x43\xa5\xd2\x5e\xcf\xf7\xc3\x8f\xa8\x73\x91\x7a\x3d\xdf\x71\x3a\x34\x83\xa9\xd0\x26\xb4\x08\xcd\xcf\x3e\xcf\x0f\xef\xaa\x62\x58\x69\xcf\x7f\x6b\x3d\xff\x3a\x87\x9e\xfd\xc5\xa0\xc3\x6b\xf3\xda\xc8\x3c\x77\x09\x88\xad\xfb\xbf\xb3\x00\x9e\x08\xd7\xd0\x73\x03\x63\xf0\x9d\x4e\xbd\xd4\x65\x37\xf3\xfb\x1c\x21\x21\x8c\xc1\x03\x32\xf1\x04\x19\xa1\x4c\xc1\x13\xd5\x79\x6c\xe0\x36\xa4\x63\xde\x88\xff\xb1\xa0\x73\x30\x49\x6b\x2a\xb8\x97\xf1\x00\x64\x32\x93\x01\x10\x39\x55\x3e\x2c\x40\xa2\xae\x24\x87\x8c\x87\xa4\x2c\xd9\xdc\x6b\x79\xdf\x42\xfd\x76\xc9\x05\xdf\xfa\xef\x8f\x65\x9c\x39\x05\x9b\x69\x0c\x97\x84\x9b\x8e\x24\x91\xa4\x50\x4a\x51\xa2\xd4\x73\x78\x69\xd7\x7c\x09\x22\x83\x8a\xa7\x98\x51\x8e\xe9\x32\xe3\x51\x2e\x2a\x96\xf2\x97\x1a\x4a\xc2\x69\x12\x1a\x63\x11\x5e\x12\xc6\xec\xed\xdf\xfe\xfd\x4b\x18\xfb\x64\xd3\x50\xd7\xa6\xf7\x1d\x7f\x45\x1b\x2b\x54\x0a\x15\xc8\x8a\x6b\x5a\x60\x38\x42\x7d\x43\x39\x61\xf4\x6f\x94\x01\x3c\xe5\x34\xc9\x81\x2a\xdb\x3c\x55\x55\x2e\xd5\x86\x87\x39\xbc\xb7\xb5\xf4\xcb\xa8\xf5\x8a\xa7\x9c\x6a\xcf\xd2\x37\x0a\xdd\xe7\x54\x99\x70\x62\x25\xa9\x24\x02\xe5\x10\x85\x91\x2d\xfa\x39\x68\x01\x29\x6a\x94\x05\xe5\x68\x7b\x73\x42\x2a\x85\x40\x78\x0a\x99\xbd\x2c\xa6\x77\xad\x9f\xf3\xa4\x2c\x91\xa7\x5e\x63\x1a\xc7\x83\x68\x12\xc0\x66\x3e\xe8\xc7\x93\x30\x0c\x7d\x73\x57\xd4\x23\x2d\xc1\x66\x97\x10\x85\xf0\xef\x41\xe4\xd4\xce\x3f\x01\x00\x00\xff\xff\xab\x6e\xee\x69\xa0\x11\x00\x00"), - }, - "/src/reflect/swapper.go": &vfsgen۰CompressedFileInfo{ - name: "swapper.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 220928330, time.UTC), - uncompressedSize: 834, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x52\x4d\x8f\xd3\x30\x10\x3d\x7b\x7e\xc5\x23\x42\x28\xd6\x56\x69\xf7\x5a\xa9\xdc\x00\xad\x58\xd8\x43\x25\xee\x6e\x3a\x6e\x6c\x5a\xdb\xb2\x9d\x16\x68\xf3\xdf\x91\x93\xb2\x95\x40\x5a\x2d\x87\x48\x93\x79\x6f\x3e\xde\x1b\xcf\xe7\xb8\xdb\xf4\x66\xbf\x85\x4d\x44\x41\xb5\xdf\xd5\x8e\x11\x59\xef\xb9\xcd\x44\xe6\x10\x7c\xcc\xa8\x76\x26\x77\xfd\xa6\x69\xfd\x61\xbe\xf3\xa1\xe3\x68\xd3\x2d\xb0\xa9\x22\xd2\xbd\x6b\xb1\x3e\xa9\x10\x38\xd6\x69\x6f\x5a\x86\x71\x99\xa3\x56\x2d\x9f\x07\x89\x82\xd7\x66\x06\x5b\xd2\x12\x67\x12\x47\x2c\x57\xf8\xa6\xf6\x3d\x3f\xe9\xa9\x42\x92\x30\x1a\xc7\xe6\xb3\x71\xdb\x5a\xe2\xcd\x0a\xeb\xb1\xd1\x99\x84\x08\xca\x99\xb6\x7e\x37\xf2\x3f\xc4\xe8\xe3\xf9\x0b\xe7\xce\x6f\x97\xa8\xae\x53\xab\x19\x4a\xe1\xf2\xb9\xc1\x20\x49\x0c\x24\xe6\x73\x7c\x54\x29\x23\xa8\xdc\x41\xfb\x88\x71\x56\x82\xd7\x48\xe6\x17\x63\x01\xe5\xb6\xb8\x6f\xf0\xd5\xe7\xce\xb8\x1d\xb2\x47\x3a\xa9\xd0\x90\x38\x3e\xb2\x2b\x5b\xf6\xc6\xe5\xfa\xd8\x3c\xb2\xab\xa5\x24\x91\x4e\x26\xb7\x1d\x46\xf4\x4c\xa2\x55\x89\xb1\x58\x92\x10\x91\x73\x1f\xdd\x3f\x5a\x31\x2d\x5f\x5d\x6d\x5d\xe2\x8f\x3f\x5b\xfe\x01\xdf\xe7\xb2\x4a\x54\x6e\xc7\x95\xc4\x70\xed\x77\xff\x42\x3f\x12\xa2\x18\x65\x8a\x43\x0b\x5c\x2e\xb0\x53\x34\x02\xe2\xf5\xc3\x0a\x7d\xa0\xf1\x1b\x48\xa8\xa2\xd4\xa6\xe6\xa1\x9c\xcd\xa9\xfd\xd3\xc6\x72\x9b\xaf\x97\x69\x3e\x71\xae\xab\xb7\x2a\x46\xf5\xb3\x14\x7a\xad\x5f\x41\xf7\x5a\x27\xce\x95\x2c\xa4\x5a\xd2\x0b\x7a\x8c\x9e\x4c\x36\x12\xef\x57\x93\xb3\x97\xcb\x94\xb2\xb7\xd4\x28\xf0\xbf\xf4\x15\x79\x06\x77\x2b\x78\xad\x49\x08\x7b\x0b\xf3\x21\x14\x05\xaa\x79\x28\x95\xb5\x29\x6c\xd5\xac\x39\x5f\xff\x67\xcf\x90\x95\x7f\x61\x76\x86\x7c\x08\xe3\xeb\x1a\xe8\x77\x00\x00\x00\xff\xff\xf3\x76\x65\x45\x42\x03\x00\x00"), - }, - "/src/regexp": &vfsgen۰DirInfo{ - name: "regexp", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 221091616, time.UTC), - }, - "/src/regexp/regexp_test.go": &vfsgen۰FileInfo{ - name: "regexp_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 221150389, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x72\x65\x67\x65\x78\x70\x0a\x0a\x69\x6d\x70\x6f\x72\x74\x20\x28\x0a\x09\x22\x74\x65\x73\x74\x69\x6e\x67\x22\x0a\x29\x0a\x0a\x66\x75\x6e\x63\x20\x54\x65\x73\x74\x4f\x6e\x65\x50\x61\x73\x73\x43\x75\x74\x6f\x66\x66\x28\x74\x20\x2a\x74\x65\x73\x74\x69\x6e\x67\x2e\x54\x29\x20\x7b\x0a\x09\x74\x2e\x53\x6b\x69\x70\x28\x29\x20\x2f\x2f\x20\x22\x4d\x61\x78\x69\x6d\x75\x6d\x20\x63\x61\x6c\x6c\x20\x73\x74\x61\x63\x6b\x20\x73\x69\x7a\x65\x20\x65\x78\x63\x65\x65\x64\x65\x64\x22\x20\x6f\x6e\x20\x56\x38\x0a\x7d\x0a"), - }, - "/src/runtime": &vfsgen۰DirInfo{ - name: "runtime", - modTime: time.Date(2019, 3, 10, 16, 38, 53, 760281362, time.UTC), - }, - "/src/runtime/debug": &vfsgen۰DirInfo{ - name: "debug", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 221365921, time.UTC), - }, - "/src/runtime/debug/debug.go": &vfsgen۰CompressedFileInfo{ - name: "debug.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 221418324, time.UTC), - uncompressedSize: 298, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\xce\xb1\x4e\x03\x31\x0c\xc6\xf1\xb9\x7e\x8a\x6f\x2c\x02\x9a\x34\xa5\x3c\x00\x0c\x9d\x8a\x10\xf0\x02\x49\xce\x1c\xa6\x77\x6e\x75\x71\x24\x2a\xd4\x77\x47\xbd\x0e\x87\xd8\xf0\xe2\xe1\x2f\xff\x64\xe7\x70\x9d\xaa\x74\x0d\x3e\x0b\xd1\x21\xe6\x5d\x6c\x19\x0d\xa7\xda\x12\xbd\x57\xcd\x28\x6c\x9b\xc7\x67\x1e\x32\xab\xcd\x45\x6d\x15\xae\x30\x2e\x7c\xd3\xcc\x39\x3c\xed\x0d\xd2\x1f\x3a\xee\x59\x8d\x9b\x05\x5e\xd8\xea\xa0\x10\x15\x93\xd8\x9d\xef\x4d\xb4\x5d\xd0\x6c\xb8\x84\xa5\xf7\x74\x9a\xf0\x6d\xfc\x7a\xb5\x98\x77\xf3\x74\x34\x2e\x67\x7a\xf4\xff\xad\x3b\x87\xb7\x0f\xfe\x1b\x20\x05\x4b\x6c\x1e\xb0\x57\xdc\xdf\xdd\x26\x31\x94\x63\x31\xee\xcb\x0d\xc2\xda\x63\x3b\x96\x55\xf8\x5d\xa6\x57\xc3\xda\x5f\x86\x4e\xf4\x13\x00\x00\xff\xff\xad\x79\xbd\xd2\x2a\x01\x00\x00"), - }, - "/src/runtime/pprof": &vfsgen۰DirInfo{ - name: "pprof", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 221589850, time.UTC), - }, - "/src/runtime/pprof/pprof.go": &vfsgen۰CompressedFileInfo{ - name: "pprof.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 221644124, time.UTC), - uncompressedSize: 660, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x92\x4f\x6b\xc2\x40\x10\xc5\xcf\x99\x4f\x31\xe4\xb4\x69\x45\xfb\x15\x8a\x97\x1e\xda\x22\xb5\xa5\x07\xf1\xb0\x26\x13\xd9\x9a\xfd\xc3\x64\x56\x2b\xe2\x77\x2f\x6b\xa4\x2c\x18\x0a\x3d\xee\xcc\xfb\x0d\xef\x3d\x76\x36\xc3\xfb\x4d\x34\x5d\x83\x5f\x3d\x40\xd0\xf5\x4e\x6f\x09\x43\x60\xdf\x02\x18\x1b\x3c\x0b\x2a\x28\x4a\xe3\x4b\x28\xca\xfe\xe8\xea\x12\x2a\x00\x39\x06\xc2\x05\xfb\xd6\x74\x84\xbd\x70\xac\x05\x4f\x50\x38\x6d\x09\xd3\xdb\xb8\x2d\x14\x36\x22\x22\x26\x66\xfa\x12\x85\xbe\xa1\xb0\x69\x80\x56\x87\x95\x71\x42\xdc\xea\x9a\x4e\xe7\xf5\x6a\x1d\x8d\x93\x20\x0c\x45\xed\xa3\x13\x6c\xa3\xab\x55\x85\xc6\x09\x14\x07\x36\x42\xc3\xc4\xf8\xe9\x67\x7a\xf1\x24\xad\x2a\x24\x66\xcf\x70\x06\x48\x5b\x54\x01\xef\xae\x8e\x2a\xbc\xe8\xde\xbd\x3a\x60\x06\x35\xb4\x89\xdb\x0c\x4d\x8e\x99\x24\xb2\x43\x67\xba\xf1\x43\xf3\x64\x68\xf0\x92\xc9\x1f\xc6\xc5\xaf\xda\x92\xaa\xae\xf9\x33\x79\x59\x8e\xeb\x1f\x9b\x46\xed\x75\x17\x09\xb3\x3a\x26\xd8\xef\x4c\x18\x6c\x9e\xc6\xb9\x37\xb2\x7e\x4f\xb7\x68\x0e\x2c\x45\xb3\xcc\x17\x1f\x57\x28\x6f\xe2\xef\xf8\x4b\xf1\x21\xe3\xf2\x9b\x17\xfc\x89\x74\xf8\xf7\xd1\x67\xef\x77\x31\xa8\xcb\xff\x18\xea\xa9\x7e\xf3\xdc\x20\x3f\x01\x00\x00\xff\xff\x14\x4a\xfc\x56\x94\x02\x00\x00"), - }, - "/src/runtime/runtime.go": &vfsgen۰CompressedFileInfo{ - name: "runtime.go", - modTime: time.Date(2019, 3, 10, 16, 38, 53, 760369022, time.UTC), - uncompressedSize: 5788, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x58\xef\x72\xdb\xb8\x11\xff\x4c\x3e\xc5\x96\xd3\xde\x91\x8e\x22\xd9\xe9\x25\x9d\x66\xea\x0f\x89\xee\xec\xcb\x34\xb6\x3c\x96\xd3\xde\x4c\x9a\xb9\x81\xc0\xa5\x04\x0b\x04\x58\x00\xb4\xac\xf3\xe8\x01\xfa\x20\x7d\xb1\x3e\x49\x67\x01\xfe\x93\x2d\x5f\xda\x4e\xf9\x45\xe2\xe2\xb7\x8b\xc5\xfe\xc3\x2e\x27\x13\x78\xb1\xa8\x85\xcc\xe1\xd6\xc6\x71\xc5\xf8\x9a\x2d\x11\x4c\xad\x9c\x28\x31\x8e\x45\x59\x69\xe3\x20\x8d\xa3\xa4\xa1\x4d\x84\x72\x68\x14\x93\x13\xbb\xb5\x49\x1c\x47\xc9\x52\xb8\x55\xbd\x18\x73\x5d\x4e\x96\xba\x5a\xa1\xb9\xb5\xfd\x9f\x5b\x9b\xc4\x59\x1c\x73\xad\xac\x83\xf3\xd9\x6c\x0e\xa7\x60\xb7\x76\x4c\x7f\x3b\xea\xbb\xeb\xe9\x8f\x70\x0a\x09\x81\x03\x6d\xaa\xcb\x4a\x48\x34\x44\x6d\x65\x25\x71\x3c\x99\x40\xc1\xd6\x08\x85\x36\x80\xc6\x68\x33\x5e\xea\xd8\x6d\x2b\x04\x2c\x18\x47\xb0\xce\xd4\xdc\xc1\x43\x1c\xfd\xec\xa9\x47\xfe\x27\xde\x05\x4c\xa0\xf5\x18\xeb\x0c\xbd\x09\xb5\x8c\x77\x71\x5c\xd4\x8a\x43\xea\x1a\x9e\xac\x59\x49\xdb\x3f\xc4\x60\xd0\xd5\x46\x81\x1b\x5b\x67\xe2\xdd\x13\x8e\x6a\xbd\xac\x98\x5b\x1d\x62\x49\x92\x6e\x0b\xa1\x84\x4b\x33\x5a\xbb\xb5\x57\xeb\x25\xbc\x3d\x85\x5b\x3b\x3e\x97\x7a\xc1\xe4\xf8\x1c\x5d\x9a\xfc\xb6\x71\x83\x4d\xb2\x40\xf8\x9a\x85\x33\x92\xd5\x8a\x98\x7b\x11\xb7\x76\xb6\xb8\x45\xee\xae\x9c\x49\x46\xe0\x77\x0a\xb2\x02\xb9\x95\x5c\x39\x93\x64\x07\xd9\x7f\x20\xf3\x3e\xe1\xf6\xd4\xaf\x31\xbb\x95\xd1\x9b\xeb\x10\x2e\x81\x81\x64\x8c\x3f\x34\x81\x13\x34\x48\x3d\x8a\xd8\x27\x13\x60\x77\x5a\xe4\x90\x23\xcb\x81\xeb\x1c\x01\xa5\x28\x85\x62\x4e\x68\x15\x47\x77\xcc\x00\x06\x77\xc7\x11\xc2\x29\x7c\x73\xb3\xad\xf0\x9d\xb5\x68\x08\xe0\x77\x78\xd8\xc5\xd1\xcf\x70\x0a\xd8\x99\xf9\x7c\x76\x3d\x9b\xdd\xec\xf9\xa2\x32\x9a\xa3\xb5\x07\x2c\xde\xac\x90\x21\x45\x01\x2d\xee\xd4\xe3\x3e\xa9\x1c\x0b\xa1\x30\x27\x11\x9d\x3f\x27\x49\x1c\xed\xe2\x68\xa9\x8d\xd6\x8e\x24\x36\x4c\x41\x1e\xaa\xbb\xd6\x48\x41\x8f\x46\x72\x03\xff\xcd\xf3\x82\x03\x62\x3c\x6f\x82\xcf\x6f\x32\x99\xf8\x94\xf9\x1e\x0b\x56\x4b\x77\x1e\x64\x08\x0b\x4a\x6f\x60\xa9\x15\x8e\x80\x33\xf5\xad\x83\xda\x22\x08\x07\xcc\x42\xc1\xa4\x5c\x30\xbe\x06\xa6\xb6\xa5\x36\x38\xf6\x42\x6e\x66\xdf\xcf\xde\xc2\x1c\x11\x44\x01\x0c\x16\xe8\x1c\x1a\xb0\x5a\xd6\x64\x47\x2f\x11\x31\xc7\x7c\xdc\x87\xed\xa4\xb6\x66\x22\x35\x67\x72\xb2\xd4\x7d\x0c\xbf\x37\xc8\xd6\x95\x16\xaa\x8b\xe4\xf1\xf7\xb8\xa8\x97\x4b\x34\x69\xd6\xa1\xa6\x4c\x4a\x34\xa9\x5d\x8b\x0a\x84\x72\x19\xa4\x15\x87\x5a\x28\x57\x39\x33\x82\x42\x48\x6c\x9c\x33\x02\x29\x14\x12\x66\x04\x7a\x0d\x0b\xad\xa5\x17\x2b\x54\xa1\x0f\x78\xab\x0d\xc2\x4b\xdc\xa4\x8d\x95\xad\x63\x7c\x9d\x64\x63\xda\x32\x4d\x6c\x25\x85\x4b\x46\x90\xfc\x4d\x25\xd9\xf8\x83\xca\xf1\x3e\x68\xf1\x02\x5e\x05\x47\x78\xc9\xbf\xe2\xdf\xe3\x11\x24\xc9\x88\x7e\x0a\x26\x2d\x7a\x37\x54\xcc\x38\x1f\x3c\xc4\xdc\xee\x54\x2f\xc2\x11\x92\xd1\x90\x2c\x68\xcb\x59\x41\x2a\xa4\x5e\x03\x97\x66\x2f\x4e\x9e\x83\x64\x2d\xe4\x89\xfe\x6f\x29\x6e\x7a\x95\xbc\x06\xcd\x79\x8e\xb3\x2e\x48\xf6\x17\x4e\x1a\x61\x23\x70\xa6\xc6\x47\xce\xb0\x9d\x37\x46\x50\x71\xf8\xfc\xa5\x71\x47\x46\xa4\x41\xbd\x3a\x26\xbe\xc9\xa4\xe5\x3a\x33\xac\x44\x1b\x62\xce\x81\x28\x2b\x89\x25\x2a\x87\xb9\xaf\xc4\xa1\x80\x9f\xde\xda\x71\xdc\x45\xd9\x87\x16\x43\xb1\x56\x69\x6b\xc5\x42\xe2\x78\x4f\x95\x20\x34\xe5\xe1\x6d\xa8\xcb\x51\xb3\xdf\x03\x34\xea\x7c\x13\x08\x0f\x3b\xd8\xc5\xa1\x96\x37\x88\x50\xcc\x1f\xba\xf2\xcd\x45\xcb\x9c\xc1\x25\xde\x53\x78\xa6\x05\xbd\x07\x86\x11\x50\x36\xb4\x01\xd6\x4a\xdf\x93\x39\xb8\x1f\xae\xa6\x10\x9e\x46\xb1\x38\x3a\xa3\x4d\xe8\x39\xa2\x7f\xe1\xdd\xe7\x4e\x73\x8d\x44\x67\x14\xd4\xf4\xb4\x84\x8f\x14\xd8\xf4\x08\xe5\xe2\xe8\x07\xe5\xcc\x76\x28\xb1\xab\x56\x53\x9f\x48\xdd\xab\xc6\xfb\xfe\x96\xd8\xbf\x1c\x78\x6d\xa8\x04\xd4\x4e\x28\x4c\xb2\x50\x72\x09\x9d\x04\x87\xef\xd5\xe3\x10\x4e\xa1\x20\x27\x23\x50\x42\x66\x83\x02\x79\xf1\xee\xa7\xab\xeb\xd9\x74\x9e\xaa\x90\x9e\xfb\x21\x70\x32\xd0\xc6\xf2\x15\xe6\x41\x1d\x4e\x19\x50\xb2\x35\xa6\x7c\xc5\x54\xe7\x80\x43\xdb\x5a\x74\x37\xa2\x44\x5d\xbb\x83\x17\x00\xc9\x26\x99\xc0\xa5\xb6\x98\xf2\x0c\x76\xd9\x08\x8e\xb3\x38\xfa\xd3\x4b\xde\x6d\x7e\x59\x97\xd3\xab\x4f\xe9\xf3\xda\x5d\xd6\x65\x67\x8f\x27\xb0\xc7\xc6\x73\xda\x31\xd9\xc1\x6d\x9b\x78\x71\x1b\x02\x17\x58\xce\x1d\x73\x76\x10\x05\x93\x09\x9c\xa3\x42\xc3\x24\x58\xc7\x9c\xb0\x4e\x70\x3b\x8e\xa3\x77\x52\x6a\xde\xc7\xc7\x9b\xef\x60\x32\x81\xc5\xd6\xa1\x05\x46\x4b\x8c\xd2\x83\xa9\x1c\xac\x13\x52\x82\x50\x54\x9f\xe3\xe8\x86\x34\x08\xbc\xcf\xb3\xa5\x78\x87\x8a\x32\xa7\x30\x88\x79\x16\x47\xf3\xad\x05\x38\xbc\x99\x5e\x38\xe6\xcb\x57\x61\x74\x49\x17\x85\xc3\x12\x52\x5b\x97\xa0\x0b\xf8\xe9\xfe\x9e\x58\x17\x28\xf5\x26\x8b\xa3\x8f\x5a\xaf\xeb\xca\xee\x8b\x51\x75\xb9\x40\x43\x68\x5f\xd1\xd1\x80\x0c\xb0\x38\xba\xf0\x2a\x3d\x8b\x2f\xc3\x72\x1c\x9d\x19\x44\xfb\x58\xbd\x1e\x47\xa7\xb0\xb1\x37\xe5\x05\x13\xaa\x3d\x28\x25\xce\x0a\x59\xb5\x6f\xd7\x1f\x91\x55\x9d\x6d\xff\x1b\xcb\x12\x63\x67\xa7\xff\xc4\x4a\x81\xe5\x43\xde\xa4\xec\x63\x16\xa1\x40\xd0\x9a\xad\x98\xb2\x0d\x56\xd1\x1d\x7b\x18\xab\xb4\x7a\xd9\xe1\x03\xfc\x1a\x25\x32\x8b\xf9\x13\xb8\x69\x17\x9c\x06\xb7\x42\x98\xcd\x03\x43\xc8\x0c\x3b\x94\xef\x23\x76\x60\xcb\xde\x02\x3a\x80\x83\x5d\x3f\xea\xcd\x4b\x89\x77\x28\xa1\x10\xf7\x98\xbf\xb4\xe2\x97\xb6\x94\xd5\x06\x5b\x2e\x6d\xf6\x6d\x3d\x99\x44\xe1\x48\xc2\x36\x9a\xd5\xa4\x95\xd2\x9b\xb0\x48\xe6\xec\x96\x0e\x99\x70\x1c\x47\x73\xba\x7a\x1b\xc3\x3c\x3e\xa7\x97\xb6\xd8\x82\xbf\x9e\x7b\x25\x1a\xa6\xc6\x59\x81\x29\x8e\x2e\xe6\x15\x53\x4f\x04\x95\x64\xce\xfe\x24\xb6\xc1\x3d\xe6\x9d\x32\xbe\xc2\xc0\x3c\xe0\xe5\x44\xdd\x67\xf6\xc0\xc0\xdd\x32\xbf\xaf\xf9\xfa\x47\x66\x57\x44\xed\x99\x2b\xa3\x0b\x21\xa9\x75\x5c\xd4\x7c\x8d\x0e\x56\xcc\xae\xc0\xb1\x85\xc4\x38\x3a\x9f\xf6\x19\xd9\xb3\x9c\x4f\xa1\x44\xc7\x72\xe6\x58\x1c\xcd\xdc\x0a\xcd\x9e\x9a\x04\xd1\x44\x6d\xb3\xb4\xcf\x83\xc6\x8b\xe7\xcc\x2c\x68\xfe\xe2\x5a\x4a\xe4\x4f\xdc\x45\x37\xda\xf9\xf4\x69\x21\x50\x78\xef\x5a\x1e\x4a\xaa\x0d\xa5\xc5\x8a\x55\x15\x2a\xd8\xac\x50\x41\x9f\x53\xff\xfa\xc7\x3f\xc1\xad\x84\x05\x56\xea\x9a\xae\xa4\x8f\xcc\x1e\x94\x89\x2a\x07\x6a\xe0\x29\xe6\x24\xb3\x7b\xf2\x53\xc5\x94\xb6\xc8\xb5\xca\x2d\x58\xa1\x38\xc2\xc9\x1f\xff\x40\x95\xfb\x8a\xd5\x16\x7d\x89\xbb\xb4\xbd\x81\x3d\xf5\xb2\xb5\xd7\xe7\x57\xaf\xdf\x7c\xe9\x37\xe2\xc2\xf0\x5a\x32\x03\x8b\xba\x28\x42\x8c\x1b\xe4\xd4\x39\x9c\x4f\xa1\x22\x4e\xc8\x6b\x13\xac\x44\xf7\xb7\x75\xed\x3a\x73\xf0\x39\xa5\xf2\x3f\x7d\xf1\xea\xf5\xeb\xec\x77\x24\xb7\xd9\xec\x07\x95\xff\xaf\x9b\xb5\x07\xb7\x71\xe4\x65\xc3\xd0\x36\xbf\x7f\x45\xbe\x9f\x5e\x7d\x3a\x33\x2c\xd8\xa2\x90\x9a\x35\xc2\x8b\x96\xa6\x0b\x98\x5e\x7d\x0a\xe6\x6b\x53\xe0\x7c\x4a\xd7\x3f\x45\x4f\x2b\x92\xba\x90\x38\xf2\x7d\x73\xb7\x8b\xa7\xf9\x50\xb8\x42\x13\x92\x78\x50\x2c\x1f\xe5\x2e\xbc\x39\xa1\xec\xbc\xac\xcb\xb9\xf8\x05\xa7\x92\x59\x1b\x4a\x11\x95\x94\xa9\x9f\xa4\xc6\x71\xf4\x7e\x4b\xab\xf0\xf9\xcd\xc9\x97\xfe\x52\x8b\x3c\x6d\x70\xa8\xae\xd4\xb7\x3e\xeb\x6a\x7a\x4b\xd8\x75\x37\xee\x35\xb2\xbc\xbd\x28\xd3\x12\x8e\xda\xff\xc3\x0e\x66\x8e\xee\x4c\x28\x26\xc5\x2f\x68\xd2\xfb\x11\x50\xcb\xed\xd0\xd0\x94\xfe\xb0\x6b\x80\xa1\xe9\x22\x74\xaf\x98\xae\xd8\xdf\x6b\xec\xda\x0a\x32\x6b\xad\xf0\xbe\xd2\xc6\x77\x9b\x02\xa5\x2f\x9a\xb9\xb0\xa4\xef\x06\xb8\x56\x77\x68\xac\x4f\xa1\xae\x0b\xfc\x39\xf4\x67\x19\xf8\x7e\x2b\xcd\xda\x76\x0b\x7e\xf5\xe9\xfa\xc1\x63\xd8\x3d\x16\x44\x7d\x1d\xb5\x72\x83\x09\x86\x3a\xcb\x43\x23\xcc\xa0\xb1\xf4\x23\xc4\x53\x61\x97\xac\xc4\x7e\x30\xfd\xca\x33\x10\x06\xed\x01\x49\xcc\x99\x36\x57\xd3\x3d\x75\xbc\xf4\x41\xef\xa3\x84\x24\x93\xd0\xf8\x7c\x81\xe5\x95\x2f\x67\x78\xcd\x9c\xd7\x12\x4e\xe1\xf5\xc9\x2b\x38\x82\x93\xe3\x57\xdf\xf5\x3e\x7b\x2f\x35\x5f\x0f\xa0\xa9\x69\xf0\x8f\x7c\x7b\x51\x3b\xbc\x6f\x70\x6d\x2a\x0c\xb0\x4d\x13\xd6\x4f\x03\xea\x0e\xad\x13\x4b\x02\x50\xf5\x19\xc3\x87\x02\x84\xfb\xd6\x76\xa3\x01\x39\xb5\x9b\x2b\x46\xe4\x56\x2b\x72\x34\x90\x6b\xb2\x91\xd5\xa3\x50\x39\x37\xc2\x22\x18\x2c\xf5\x5d\x10\x04\x5c\x97\xc4\x31\xde\x9f\x5c\x82\x9a\x74\xc7\xa4\x8b\xba\x80\xcf\x5f\xe8\x3a\x1a\x51\x2a\x35\xbd\x7f\xa3\xe0\xa1\x6f\x01\xcf\x4f\x97\x7e\x72\xfc\xd5\xcf\x02\xc7\x7e\x50\x6c\x5e\xb8\xae\xb6\xb4\xfd\x08\xec\xde\xb4\x98\xf4\x84\xc1\x10\xd8\x8c\xaa\x7e\x50\xec\x47\xbb\xbe\x5d\xff\xa8\xf9\x7a\x36\xbf\x59\x19\x64\xbe\x13\x6f\xe9\x9f\x94\x7c\x66\xe5\x2f\x21\x2f\x0e\x7d\x8e\xb2\x5b\x3b\xbe\x59\x61\x83\x18\x5a\xcc\xb8\x1b\xc3\x38\x85\xa7\xff\xe0\xd2\x87\x9f\x12\xb2\x8d\xe4\xb9\xd3\x55\x8b\x6a\xa3\x74\xd7\x97\x86\x76\x29\x58\xdd\x8f\x91\x7f\xc5\xf0\xdd\x8e\x01\x5f\x6a\x40\x75\x27\x8c\x56\x7e\x3a\x74\x1a\x38\x73\x7c\x15\xb6\xb3\x63\xb8\x59\xa1\x41\x9a\x2a\x37\x08\x2b\x76\xb7\x1f\x18\xcd\xd5\xa5\x72\x60\x72\xc3\xb6\xb6\xcb\xd8\x7e\x56\x58\x6a\x6f\x5a\xef\xe2\x37\xdf\x3d\x1e\x69\x3d\xcc\x7f\x2b\x9c\x15\x29\x56\x70\xb4\x57\x95\x8e\xc2\x57\xc4\x07\x9a\xf5\x95\xe0\x69\xd2\x20\xdf\xfa\xb1\xd7\xd6\x55\x28\x43\x49\xef\x95\x3f\x23\x56\xef\xa4\xb8\xc3\x74\xbf\xbc\xb5\xeb\x7e\xf2\x4a\x6d\xe3\x81\xac\x17\xed\x8f\xdb\x78\xd9\x06\x37\x53\xb6\xac\xd0\x22\x30\xd3\x5f\x1b\x1e\xbd\x31\xac\x1a\xc3\xe5\xff\x61\xf4\x5e\xa2\x0b\xf3\x76\xc5\x0f\x94\xc5\xa7\x15\xb0\x10\x2a\xf7\x73\xda\xb0\xd0\x10\xe1\x83\x2a\x74\x8f\x6f\x29\x7e\x40\x0f\x8c\xb5\xe2\x8a\xea\x5c\xd1\x2d\x0e\x2a\xde\xa3\xa2\xe6\x2f\x82\x4e\x6a\x37\xd3\xff\x3b\x00\x00\xff\xff\xa9\x51\x33\x3f\x9c\x16\x00\x00"), - }, - "/src/strings": &vfsgen۰DirInfo{ - name: "strings", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 533200223, time.UTC), - }, - "/src/strings/strings.go": &vfsgen۰CompressedFileInfo{ - name: "strings.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 222212394, time.UTC), - uncompressedSize: 1759, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x94\xd1\x6f\xe3\x44\x10\xc6\x9f\xbd\x7f\xc5\x60\x1e\xce\xa6\xa9\x9d\xb4\x4d\x49\x83\x82\x74\x0d\x52\x29\x42\xea\xe9\x0a\xe2\xe1\x74\x0f\xeb\xf5\x38\x9e\x64\xbd\x6b\xed\xac\xdb\x0b\xa8\xff\x3b\x5a\x3b\x6e\x73\xe5\x10\x02\xfa\xe4\xc6\xb3\xbf\xf9\xe6\xf3\x37\x9b\xe7\x70\x52\x74\xa4\x4b\xd8\xb2\x10\xad\x54\x3b\xb9\x41\x60\xef\xc8\x6c\x58\x08\x6a\x5a\xeb\x3c\x24\x22\x8a\x3b\x43\xca\x96\x98\x77\xbe\x5a\xc4\x42\x44\xf1\x86\x7c\xdd\x15\x99\xb2\x4d\xbe\xb1\x6d\x8d\x6e\xcb\x2f\x0f\x5b\x8e\x45\x2a\x44\xd5\x19\x05\xb7\xa6\xc4\x4f\xd7\x7b\x8f\x09\x1f\xc8\x13\x50\x50\xec\x3d\xa6\x40\xc6\xc3\x1f\x22\x72\xe8\x3b\x67\x60\xcb\xd9\xad\xf1\xe8\x8c\xd4\x77\xc5\x16\x95\x4f\x38\xcd\xd6\x52\xeb\x24\xa6\x00\xb9\xab\xe2\x49\x28\xba\xd1\xb6\x90\x3a\xbb\x41\x9f\xc4\xf7\x3d\x31\x1e\xeb\x2a\x67\x9b\x75\x2d\xdd\xda\x96\x18\x4f\x40\xa5\x69\x40\x26\xa9\x78\x3a\x56\x93\xf0\x04\x18\xdb\x83\x9c\xff\x2a\xe3\x75\x11\xb6\x7f\xe9\xf6\xb3\x64\xff\xff\x3a\xea\x91\xf0\x2f\xba\xae\x6d\x67\xfc\xdf\x74\x34\xb0\x5c\xc1\x54\x44\x79\x0e\xdc\xa2\x22\xa9\x41\x49\x46\x16\x11\x3f\x92\x57\x75\xa8\x09\x3f\x80\x46\xd3\xc3\x61\xb5\x82\xe9\x52\x44\xa3\xd6\x10\x80\xec\x7d\x67\xb0\xef\x72\x6b\x86\x0f\x90\x70\x0a\x27\x30\x7b\x7d\xf6\xfb\xe1\x31\x3d\x3a\x3f\xfd\x02\xff\xa5\x88\xaa\x5e\xf4\x6a\x05\x1c\x94\x3c\x9f\x9a\x89\x28\x7a\xfa\x0c\xf2\x24\x44\x54\x59\xd7\x57\xb5\x96\xc3\x58\xc7\x4e\xa7\x03\x2c\xbc\x59\xad\xe0\x74\x36\xd0\x0a\x87\x72\x77\x40\x99\x93\x13\x11\x45\x0c\x2b\xe0\x0f\xad\xe5\x93\x51\xd0\xf2\x63\x80\x8f\x9d\xcc\xb3\xab\x49\x01\xdf\x5c\x87\x5d\x41\x97\xc2\x61\xea\xf4\x60\x6f\xa0\xe7\x39\xfc\xda\xb2\x77\x28\x1b\x38\xd4\x65\x43\x19\x38\xd4\x84\x0c\xd6\xc0\xb8\x62\x9d\x61\x59\x61\x06\xbf\x21\x28\x69\xde\x78\x28\x2d\xf8\x5a\xfa\xac\xe7\xfc\x72\xf7\xc3\xdd\x12\x6e\xfd\x1b\x0e\x03\x30\x15\x1a\xfb\xb7\xe0\x6b\x04\x34\x9e\xdc\xf3\x92\x66\x87\x56\xf0\xf6\xdd\x6d\x40\x41\x81\x40\x4d\xab\xb1\x41\xe3\xb1\xec\x71\xc3\x5f\x63\x1d\x02\x56\x15\x29\x42\xe3\xf5\x1e\x82\x7b\x37\x77\x6f\xdf\xaf\x7f\x5c\x6d\x79\x48\x43\x45\x4a\x6a\xbd\x87\x44\x3e\x58\x2a\xa1\xe3\xa0\xfe\xc3\xc7\xb0\xac\x13\x20\xc3\x1e\xe5\x31\xb2\x63\x04\x79\xf0\x02\x4a\x72\xa8\xbc\xde\x7f\x07\xd6\x01\xdb\x06\xe1\x27\xf9\x20\xef\x95\xa3\xd6\x8f\x36\x15\x47\x62\xa9\x02\x6b\x10\xf0\x13\xb1\xe7\x34\x3b\xc2\x5e\x77\x61\x52\x62\x20\x1e\x54\x3f\x5a\xb7\x9b\x40\x89\x15\x3a\x28\x6d\x00\x91\x87\xce\x78\xd2\xc1\x11\x87\x6f\x18\x24\x18\xc4\x12\xb8\xb6\x8f\x06\x1e\x48\x42\xeb\x6c\x45\x3a\xdc\x36\x47\x64\x69\xca\xe1\x04\x48\x87\x50\xa0\x51\x75\x23\xdd\x8e\x41\x3e\x48\xd2\x32\xf8\x9c\x30\x22\xd4\xde\xb7\xbc\xcc\xf3\xcf\x2e\x39\x2d\xcd\x26\xdf\xd8\x9c\x98\x3b\xe4\x7c\xb6\xb8\xba\x9a\x7e\xdd\xff\xa3\x6c\x13\xec\x3e\x3d\x9f\x9f\x4d\x2f\x17\xf3\xf3\xf3\x30\xce\x21\x40\xc3\xe4\x49\x91\x15\x5d\x95\x7e\x39\x4c\xca\xb6\xfb\x75\x8d\x6a\x97\xa4\x21\x48\x54\x41\x91\xc9\xb2\x74\x21\xb9\x86\x74\x1f\xdd\xe3\x74\x3d\xd7\x87\x0f\xc0\x60\x2c\xb2\x92\x2d\x4e\xe0\xb1\x26\x55\x43\x8b\xae\xb2\xae\xe1\x31\x64\xef\x2c\x85\x3b\x03\x1a\x69\xa8\xed\xb4\xf4\x64\x4d\x36\x20\x5f\xc7\x6f\x02\x6c\x81\x77\xd4\x02\xf9\x0c\xee\xff\xc9\x89\x30\x37\xf9\xfc\x62\x71\x31\x5f\x5c\xaa\xc5\x4c\x4e\x67\x57\x97\x78\x71\x26\xd5\xfc\xac\xba\x9c\xcf\x0a\x35\xbf\x9c\xce\xbe\x55\xf2\x62\x7e\x71\xb6\x98\x86\xa6\xe3\x64\x50\x88\xe8\x09\x50\x33\xc2\xcb\xbc\x5f\xad\xa0\x18\x16\x5a\x1a\x52\x49\x7c\xc8\xf8\x12\x48\x6b\xdc\x48\xdd\x07\xce\x56\x60\xac\x39\xfd\x1d\x9d\x1d\xf7\x2c\x38\x42\x58\x42\xb1\x87\x07\xa9\x3b\x8c\xd3\xb0\xc2\x4f\xe2\xcf\x00\x00\x00\xff\xff\x3c\x43\xb4\x54\xdf\x06\x00\x00"), - }, - "/src/strings/strings_test.go": &vfsgen۰CompressedFileInfo{ - name: "strings_test.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 533570947, time.UTC), - uncompressedSize: 388, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x90\xc1\x4a\xc4\x40\x0c\x86\xcf\xe6\x29\xc2\x9c\x76\x55\xba\xcf\xa0\x1e\x16\x04\x41\x3a\xbd\xcb\xd8\xa6\x75\x6c\x27\x33\x24\x19\x3c\x88\xef\x2e\xa5\xf5\x24\xe2\x6d\x8f\x81\xef\xcb\x07\xff\xe9\x84\x37\xaf\x35\x2e\x03\xbe\x2b\x40\x09\xfd\x1c\x26\x42\x35\x89\x3c\xe9\x8b\x91\x1a\x40\x4c\x25\x8b\xa1\x5b\xaf\xc8\x93\x03\x18\x2b\xf7\xd8\x91\xda\xfd\xaa\x92\xdc\x2d\x4b\xee\xf5\x60\x78\xbd\x33\x4d\x77\xc4\x4f\xb8\xb2\xc6\xcf\xb1\x1c\x9c\x54\xb6\x98\xa8\x69\x29\x0c\x4f\x94\xbc\x05\xd3\x5b\xfc\x61\x37\xfb\x99\xa4\xad\x8c\x9c\x0d\xb5\x96\xb5\x48\x03\x46\xc6\x73\x2e\x6f\x24\x8f\xde\x1d\xe1\xeb\x77\xf9\x2c\xf9\xe3\xa2\xdd\x87\x9c\x4a\x10\xf2\xdb\x42\x7f\xa7\x2b\x6b\x18\x77\xec\x9f\xe7\xdf\x01\x00\x00\xff\xff\xe9\xc8\x01\xe4\x84\x01\x00\x00"), - }, - "/src/sync": &vfsgen۰DirInfo{ - name: "sync", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223786077, time.UTC), - }, - "/src/sync/atomic": &vfsgen۰DirInfo{ - name: "atomic", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223002354, time.UTC), - }, - "/src/sync/atomic/atomic.go": &vfsgen۰CompressedFileInfo{ - name: "atomic.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 222676490, time.UTC), - uncompressedSize: 3060, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x56\xcf\x6f\x9b\x3e\x14\x3f\xe3\xbf\xe2\x7d\x39\x54\xd0\x7e\x45\xa4\xad\xea\xa1\x52\x0e\xd5\x0e\x53\xa5\x49\x9b\x54\x75\x77\x07\x4c\xea\xcc\xb1\x91\xb1\x69\xa2\x28\xff\xfb\x64\x03\xc1\x80\x61\x5d\xb2\xf6\x84\x8b\xfc\xf9\xc1\x7b\x9f\xf7\x9a\xc5\x02\x6e\x56\x9a\xb2\x0c\x36\x25\x42\x05\x4e\x7f\xe1\x35\x01\xac\xc4\x96\xa6\x08\xd1\x6d\x21\xa4\x82\x08\x05\xa1\xe6\x25\xce\x49\x88\x50\x10\xae\xa9\x7a\xd1\xab\x24\x15\xdb\xc5\x5a\x14\x2f\x44\x6e\xca\xee\xb0\x29\x43\x14\x23\x94\x6b\x9e\xc2\xd3\x2b\x2e\x1e\xb9\xfa\xfc\x29\xc2\x59\x26\xe1\x9a\x9a\xf3\xff\xc0\xc9\x2b\xd8\x63\x5c\x3f\xe0\x80\x02\xc1\x32\xb8\x5f\xc2\xb5\xb9\x88\x02\xfb\x80\xa5\xb9\x89\x02\x49\x94\x96\x1c\x04\xcb\xd0\xb1\x4f\x7c\x77\xdb\x11\xdf\xdd\x9e\x88\xef\x6e\xe3\xfa\x71\x1e\xf1\x33\x75\x2c\x6b\xc7\xb3\x6e\x4c\xeb\x0b\x5c\x3f\x53\xc7\xb6\x76\x7c\xeb\xc6\xb8\xbe\xd0\x79\xa1\xa4\xc3\x5e\x28\xd9\xd1\x17\x4a\xc6\xed\xe1\x3c\x81\x1f\x82\x72\x45\x4e\x02\x36\x12\x49\xf3\xb2\xd1\xe9\xbd\x8b\x07\x7f\xff\xbd\xea\x17\xb1\x2d\xb0\x24\x0f\x3c\x9b\x08\x93\x60\x59\x2f\x51\x2b\x21\x98\x91\xa1\x39\x34\xdc\x4b\x73\xc7\xbc\xea\x8b\xb5\x6a\x4a\x6a\x82\x82\xe3\x49\x3d\xc7\xac\x24\xd3\xfa\xc3\xcc\xb9\xfa\xa6\x7f\xef\xaa\xef\x8d\xe6\xc9\x81\xfe\x88\x12\x78\x03\xdc\xb3\xf0\x21\x55\xf0\xc4\xbc\x67\xc2\x66\xfd\x5d\x5d\xcc\xcf\x42\x67\x66\x30\x10\xff\xd8\xd3\x43\x96\x79\x86\x22\x23\x4c\xe1\xd1\x8e\x35\x76\xda\xc9\x83\x9b\xfa\x92\x7f\x02\xcd\xd9\x51\xf0\xc6\xae\xd6\x18\xef\xc4\xb3\x55\x3c\xc3\x75\xfa\x8e\xde\x4a\xbf\xe8\x3b\x46\xd9\xed\xbe\xa3\xbf\x7e\x2f\x52\xf1\xc4\xb3\xd3\x19\xee\xe1\xf3\x94\xbe\x09\x3c\x6e\xbd\xd3\xed\x06\x52\xef\xd9\x01\xa8\x5f\x67\xa7\xb4\x93\x20\x4f\x04\xdc\xa6\xcf\xe2\x06\x25\x77\x8b\x3c\x8b\x1b\x15\xb1\x57\xb5\x49\xe8\xdc\x60\xfa\xfe\x21\x79\x89\x9e\x94\x90\xc4\x33\x59\x15\x66\xed\x5c\x1d\xba\x1e\x55\x98\x8d\x90\xc3\x2c\x37\x48\xf3\xfd\x73\x48\xef\xac\x19\xac\x7e\x83\xac\x37\xe0\x2d\xf8\x2d\xca\x9e\xdc\xb6\x70\x5b\xff\x39\xfc\xfc\x42\xb4\x34\x83\x5e\x4c\xb0\x45\x15\x5c\xff\xc4\x4c\x93\xd8\xf6\x33\x8a\x21\xda\x81\x85\xe4\x38\x25\x87\x63\xec\x74\xad\x4a\x2a\x1f\xce\x1a\xf2\xa0\x68\x0e\x3b\xb3\x71\x39\xb5\x4b\x38\x28\x30\xa7\x69\x14\x96\x7b\x9e\x2e\xea\x1f\xbd\xf7\x50\x1a\x2c\x88\xdc\x5e\xaa\x0c\x9f\xa1\x11\x60\xa9\xc3\xd8\xee\x62\x9a\x1b\x65\xf8\xaf\x66\xba\xba\x82\x4d\x99\x3c\x1a\x2d\x8e\xd9\xf7\xd5\x86\xa4\x2a\xda\xc5\xc9\x57\xa2\xa2\x30\x15\xbc\x54\x52\xa7\x4a\xc8\x30\x36\x88\xf1\xd5\x2a\xa9\xbc\x97\xff\xe8\x90\x72\x03\xa0\xa5\x22\x5c\xb1\x3d\xa8\x7d\x41\xb2\x29\xcb\xc6\xef\x12\x76\xe8\x88\x7e\x07\x00\x00\xff\xff\x2a\xf7\xf1\xfd\xf4\x0b\x00\x00"), - }, - "/src/sync/atomic/atomic_test.go": &vfsgen۰FileInfo{ - name: "atomic_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223062879, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x61\x74\x6f\x6d\x69\x63\x5f\x74\x65\x73\x74\x0a\x0a\x69\x6d\x70\x6f\x72\x74\x20\x22\x74\x65\x73\x74\x69\x6e\x67\x22\x0a\x0a\x66\x75\x6e\x63\x20\x54\x65\x73\x74\x48\x61\x6d\x6d\x65\x72\x53\x74\x6f\x72\x65\x4c\x6f\x61\x64\x28\x74\x20\x2a\x74\x65\x73\x74\x69\x6e\x67\x2e\x54\x29\x20\x7b\x0a\x09\x74\x2e\x53\x6b\x69\x70\x28\x22\x75\x73\x65\x20\x6f\x66\x20\x75\x6e\x73\x61\x66\x65\x22\x29\x0a\x7d\x0a"), - }, - "/src/sync/cond.go": &vfsgen۰CompressedFileInfo{ - name: "cond.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223211458, time.UTC), - uncompressedSize: 511, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x8f\x31\x73\xab\x30\x10\x84\x6b\xdd\xaf\xd8\x12\x1e\x83\x71\xfd\x6c\x9a\xe7\x96\xee\x4d\x26\xb5\x2c\x84\x7d\x41\x3e\x31\x20\x92\x61\x32\xfc\xf7\x8c\x90\x93\x14\xb6\x9a\x93\x76\x75\xfb\xcd\x56\x15\x8a\xf3\xcc\xae\xc5\xdb\x44\x34\x68\xd3\xeb\x8b\xc5\xb4\x88\x21\x0a\xcb\x60\x71\xf2\xd2\x62\x0a\xe3\x6c\x02\x3e\x49\x55\x15\x3a\xb6\xae\x9d\x30\x4f\xb6\xc5\x79\xc1\xbb\x16\x76\x4e\x83\x6f\x83\xb3\x37\x2b\x41\x07\xf6\x42\x4a\xfc\xc9\x0f\x0b\x90\x26\xa9\x06\xe9\x34\xde\xf4\x76\x8c\x7e\xe0\x6e\xf3\xe3\x6c\x78\x0a\xa4\xcc\xd5\x46\x13\xc6\x0f\xcb\x29\xdd\xe9\x19\x53\xec\xc7\x23\x0f\x60\xd9\x32\x60\xae\x5a\x70\xf6\xde\xd1\x4a\xd4\xcd\x62\x90\x19\xfc\x89\x4d\x72\xbc\x6a\x0e\x59\x1e\xab\x98\x9d\x14\x05\x29\xee\x60\x76\xe6\x8a\xba\x86\xb0\x8b\x86\x4a\x6f\xdc\x74\x6f\xb3\x9f\xac\x9c\xd4\x1a\x97\x9a\xdd\x8b\x38\x6f\xfa\x2c\x27\x75\x2c\xe3\xd7\xa4\x36\x49\x7b\x24\xfe\xe7\x8b\x68\x97\x98\x1b\x4c\x22\x6b\xbf\x91\x46\x1b\xe6\x51\xee\xc9\x52\x96\x94\xd8\xc7\x12\x61\x9c\xed\x93\xb0\x7f\xa3\xd7\xad\xd1\xd3\xbd\x83\xe0\x6f\x1d\x13\xb7\x75\xd4\xd8\x93\xea\xfc\x08\x8e\xf2\xfe\x00\xc6\x11\x72\x00\x17\xc5\x6f\xaf\xef\x6c\xb5\xd2\x4a\x5f\x01\x00\x00\xff\xff\x2c\xcb\x53\xaf\xff\x01\x00\x00"), - }, - "/src/sync/export_test.go": &vfsgen۰CompressedFileInfo{ - name: "export_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223318328, time.UTC), - uncompressedSize: 168, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\xca\x4d\x0a\xc2\x30\x10\x05\xe0\x7d\x4e\xf1\x96\x8a\x3f\xf1\x02\xde\x41\x0a\xae\x25\x4d\x5f\x35\xda\x4c\x42\x32\x29\x94\xd2\xbb\xbb\x15\xdc\x7f\xd6\xe2\xd0\xb7\x30\x0d\x78\x57\x63\xb2\xf3\x1f\xf7\x24\xea\x22\xde\x18\x6b\xd1\x71\x64\xa1\x78\x0e\xe8\x17\x28\xab\xd6\x23\x84\x1c\xa0\x09\x2f\x37\x13\x92\x4e\x29\x23\xc4\x3c\x31\x52\xd4\x69\x48\x52\xcf\x66\x76\x05\x5d\x13\x0d\x91\x8f\x5c\x92\xbf\x05\xc1\x15\x63\x13\xbf\xdb\x23\x88\x62\x45\xa1\xb6\x22\xb8\x60\xfb\xd3\x77\xc9\xbf\x7e\xdd\xcc\x37\x00\x00\xff\xff\x78\xcd\x49\xae\xa8\x00\x00\x00"), - }, - "/src/sync/pool.go": &vfsgen۰CompressedFileInfo{ - name: "pool.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223439701, time.UTC), - uncompressedSize: 505, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x90\xcf\x4e\xf3\x30\x10\xc4\xcf\xde\xa7\x98\xaf\xa7\xe4\x03\x5a\xb8\x56\xca\x89\x03\x37\x54\x89\x63\x55\x21\xe3\x6e\x2a\x83\xeb\x58\xce\x5a\xa4\x54\x79\x77\xe4\x24\xfd\x83\x20\x97\x68\x77\x46\xbf\x99\xf5\x62\x81\x9b\xb7\x64\xdd\x16\xef\x2d\x51\xd0\xe6\x43\xef\x18\xed\xc1\x1b\x22\xbb\x0f\x4d\x14\xcc\x92\x6f\x75\xcd\x33\x22\x39\x04\xc6\xaa\x69\x1c\x5a\x89\xc9\x08\x8e\xa4\x5c\x63\xb4\x43\xfe\x46\xdb\x7c\xd5\x58\x2f\x1c\x27\xe5\xc5\x7e\x31\x92\xf5\x12\x24\x12\xa9\x56\x9a\xc8\x58\x6f\x06\x4b\xad\x0d\x1f\x7b\x52\xcf\xfc\x09\xa0\x4e\xde\x14\x25\xae\x95\x9e\x28\x6f\x51\x04\xfc\xcf\xb1\x25\x9e\x58\x7e\x7a\x72\x05\x5b\xc3\xb1\x2f\xc2\x7c\xa0\x97\xa8\x2a\xdc\xe7\x7d\x16\xc2\x3c\xd3\xff\x55\xf0\xd6\x0d\x3b\x15\x59\x52\xf4\xa3\x50\x94\xa4\x54\x4f\xe7\xa5\xb7\x8e\xf2\xdc\x61\x59\x61\xe2\xad\xaf\xd9\x77\x0f\x1b\x52\xd3\x80\x8b\x65\xf9\xcb\x33\x01\xbb\x3f\x6e\x58\x25\x29\xba\xeb\x1b\xca\xe9\x88\x2e\x37\x3f\xf5\x1c\x01\x43\x9b\x4b\x9e\x0e\x81\xfd\xf6\x94\x74\x8b\xae\x3c\xf3\x63\xf2\x62\xf7\xfc\x1a\x79\x67\x5b\xe1\x98\xb3\x1e\x1d\x6b\x9f\x42\x61\xc6\xff\xf4\xc4\x39\xae\xa7\xef\x00\x00\x00\xff\xff\xd6\xf1\x0f\x08\xf9\x01\x00\x00"), - }, - "/src/sync/sync.go": &vfsgen۰CompressedFileInfo{ - name: "sync.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223579740, time.UTC), - uncompressedSize: 2015, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x55\xdb\x6e\xe3\x36\x10\x7d\x36\xbf\x62\x60\x14\xa8\x94\xd8\x52\xd2\x2d\xb6\x40\xb0\x7e\x28\xb2\xc5\x22\x40\xbb\x0b\x34\x29\xfa\x10\x18\x0d\x25\x8d\x4c\xc6\x14\xa9\x72\x28\xab\x6e\x90\x7f\x5f\x0c\xa5\xf8\x92\x9b\x5f\x4c\x90\x33\x67\xce\x9c\xb9\x28\xcf\xe1\xb4\xe8\xb4\xa9\xe0\x9e\x84\x68\x65\xb9\x96\x2b\x04\xda\xda\x52\x08\xdd\xb4\xce\x07\x98\xae\x74\x50\x5d\x91\x95\xae\xc9\x57\xae\x55\xe8\xef\x69\x7f\xb8\xa7\xa9\x10\x1b\xe9\x81\xb0\xf9\x5b\xea\x80\x9e\x60\x01\x8d\x5c\x63\xd2\xc8\xf6\xf6\xa4\xd3\x36\x7c\xf8\x69\x79\xbb\x2c\x95\xb4\x50\x38\x67\x52\x21\xf2\x9c\xcd\x7f\xed\xdd\x1a\x2d\x04\x2f\xcb\x35\x41\x50\x08\xb6\x6b\x0a\xf4\xe0\x6a\xe8\x47\x28\x39\xd8\x14\x5b\xf0\x9d\x0d\xba\xc1\x7f\xae\xb1\xf1\x68\x50\x12\x42\x72\x57\x2a\xf8\x34\x87\xe0\x3b\xbc\x4b\x19\x35\x28\x19\x40\xc9\x0d\x82\x75\x01\xb6\x18\x40\x96\xff\x76\xda\x63\x15\xf1\x09\x1b\xd9\x2a\xe7\xd9\xf5\xd3\xbc\x54\x77\xa0\xed\x21\xf0\x68\xfc\x47\x17\xf0\xbf\x34\x13\x79\xce\x98\x37\x4a\x13\xb4\x1e\x37\x68\x03\x81\x04\x8b\x3d\x94\xd2\x18\x08\xee\x2d\x5f\x7e\xea\xbd\xb3\x2b\xb3\x7d\x22\x70\x1c\x9f\x71\xb5\x85\x02\x43\x8f\x68\x21\x29\xb0\x94\x1d\xe1\x6b\x49\x2a\x49\x20\x8d\x47\x59\x6d\x41\xdb\xd2\x63\x83\x36\xbc\xc8\xa7\x57\xda\x44\xd4\x48\x4c\x21\xb4\x68\x2b\x6d\x57\x91\x29\xbd\x47\xf5\x48\x2d\x8f\x25\xea\x0d\x56\x50\x7b\xd7\x44\x1c\x2e\x9b\x45\x13\xa1\x2d\x47\xed\x08\x2a\x7c\x83\xc6\x4e\xb3\x6b\x44\x50\x21\xb4\x74\x91\xe7\xef\xb6\x8f\x26\xea\x90\xf2\x5f\x3e\x7c\xcc\x9e\xba\x68\x6c\x8b\x57\x9a\x68\xf8\x4b\x85\xa8\x3b\x5b\xbe\x92\x50\x42\x30\x9a\xa6\xf0\x20\x26\x6f\x64\x9c\xd0\x0c\x6a\x69\x08\x53\xf1\x28\x06\xb2\xc7\x8a\x68\x02\xa3\xd7\x78\x70\x3f\x83\xa2\x0b\x50\x3b\x0f\xad\x77\xb5\x36\x51\x58\x67\x03\xda\x0a\x2b\x88\x5e\x48\x9c\xfb\x70\x3e\xb0\xd2\x14\xb5\xa5\xae\xe5\x59\xc2\x6a\x06\xe4\xe0\xbe\xa3\x00\x5c\xee\x28\x9e\x6c\x10\x74\xd3\x9a\xa8\xa8\x0c\xda\x59\x90\xf4\x4a\x76\x11\xff\xe6\xdb\xe7\x6f\x17\x70\x65\x37\x48\x41\xaf\x64\x60\x0c\x4d\x19\x5c\xd5\xa0\xc3\x8f\x04\xad\x23\xd2\x85\x41\xae\xf8\x0e\x74\xc6\x64\x49\x57\xe8\xa1\x72\xcc\x8a\xdc\x0c\x5c\x50\xe8\x7b\xcd\x4d\x87\x8d\xdb\x0c\x40\x50\xba\x86\x3d\xb2\xb7\x24\x1e\x15\x7c\xd2\x79\x06\x46\xd7\x6e\x18\x6b\x96\x5c\xd7\x90\x9c\x10\xcc\xf7\x75\xbc\xa5\x65\x0a\x8b\x05\x9c\xf1\xf3\xa4\x54\x70\x31\x16\xf6\x60\x1f\x4c\xd8\x2f\x02\xb1\xcd\x64\xbf\x49\x6e\x69\x09\x0b\x90\x2d\x37\x73\x72\xb0\x42\x1e\x4a\xf5\x38\x83\x23\xbb\x2c\xcb\x18\xe8\x11\xd0\x10\xbe\x8b\x73\x74\x3d\x83\x52\x45\x3f\x31\x99\xf0\x46\x10\xd1\x6d\x47\x1d\xe6\x0b\x38\x1f\xf8\x1d\x5d\xef\x12\x9a\x54\x68\x30\x60\xb2\x7b\x9d\x01\x8d\x78\x8f\x62\x72\x42\xf3\x39\x37\xd9\x73\x31\xc7\xd9\x3e\xd4\x51\x49\x5b\xb9\xba\xde\x4b\xb9\x2b\xf6\x5f\x71\x09\x0c\xaf\xba\x06\x8b\x58\x61\x95\x3f\x15\x3a\xe3\x28\xa7\xa7\x42\x4c\x7a\x96\xf6\x28\xb9\x58\x0f\x83\x36\xe9\x0f\x4a\xe0\x31\x74\xde\x32\x3d\x31\x96\xa3\xbf\x3d\x5b\xb2\x3b\x9f\xce\x2f\x96\xe2\x85\x70\xfd\xab\x40\xfb\xcc\x47\xe3\x21\x75\xc6\x3d\xd2\xea\x94\x25\x8c\xb1\xc6\x55\xfd\x42\x11\xeb\x82\xae\xb7\xbf\x6b\x0a\x97\x0a\xcb\x75\x42\xfa\x7f\x04\x16\xa6\x0d\x3e\x85\x87\xe7\xe6\xa5\xb4\xd7\xad\xb6\x89\x06\x6d\x43\x1a\x15\x8b\xe3\x1e\x13\x1b\x46\x7b\x9c\xec\x4b\xd7\x6e\xf9\x6b\xc2\x6e\xd9\xe8\xfe\x55\x5a\xf7\xac\xbd\xad\x64\x06\x0d\x26\x29\x23\x7e\xfc\x99\xd1\x78\x62\x02\x34\xda\x18\x4d\x58\x3a\x5b\xc1\x02\xce\xcf\xe2\x6f\x17\xea\x9e\xb2\x2f\xc6\x15\xd2\x64\x5f\x30\x24\xd3\xcf\x32\xe0\x34\xcd\xbe\x62\x9f\xa4\xd9\xa5\x34\x26\x99\xae\x30\xdc\xe8\x86\x6f\xaf\x18\x38\x49\xe1\xe4\x10\x73\xa4\x79\xf5\x34\xa8\x58\x1d\x7c\x90\x46\x92\x41\x79\xd7\x27\x04\x14\xbc\xb6\xab\xd8\x1a\xfb\xb8\x43\x94\x1f\xa2\xcd\x9f\x83\xdb\x6f\xde\x3b\x3f\x8d\xb5\x78\x14\xdf\x03\x00\x00\xff\xff\xaa\x5d\x20\xc4\xdf\x07\x00\x00"), - }, - "/src/sync/sync_test.go": &vfsgen۰CompressedFileInfo{ - name: "sync_test.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223698630, time.UTC), - uncompressedSize: 240, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd7\x57\xd0\x4e\x2a\xcd\xcc\x49\x51\xc8\x2a\xe6\xe2\x2a\x48\x4c\xce\x4e\x4c\x4f\x55\x28\xae\xcc\x4b\x8e\x2f\x49\x2d\x2e\xe1\xe2\xca\xcc\x2d\xc8\x2f\x2a\x51\xd0\xe0\xe2\x54\x02\x09\x64\xe6\xa5\x2b\x71\x69\x72\x71\xa5\x95\xe6\x25\x2b\x84\xa4\x16\x97\x04\xe4\xe7\xe7\x68\x94\x28\x68\x41\x25\xf5\x42\x34\x15\xaa\xb9\x38\x4b\xf4\x82\xb3\x33\x0b\x34\x34\xb9\x6a\xd1\x94\xba\x3b\x93\xa0\x38\x28\x35\x27\x35\xb1\x38\x95\x48\x1d\xce\xf9\x79\x29\xce\xf9\x05\x95\x78\x95\x03\x02\x00\x00\xff\xff\x93\xcf\x90\x60\xf0\x00\x00\x00"), - }, - "/src/sync/waitgroup.go": &vfsgen۰CompressedFileInfo{ - name: "waitgroup.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 223838274, time.UTC), - uncompressedSize: 446, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xd0\x4d\x4e\xc3\x30\x10\x05\xe0\xb5\xe7\x14\x8f\x2e\x2a\x87\x0a\x5a\xe8\x0e\x35\x48\xac\x38\x02\x0b\xc4\xc2\x38\x6e\x62\x1a\x26\x51\x32\xa6\xaa\xaa\xdc\x1d\xd9\x04\x08\x3f\xcd\x2a\x7a\x1e\x7d\x7e\x9e\xe5\x12\x8b\xe7\xe0\xeb\x02\x2f\x3d\x51\x6b\xec\xce\x94\x0e\xfd\x81\x2d\x91\x1c\x5a\x87\x07\xe3\xe5\xbe\x6b\x42\x8b\x5e\xba\x60\x05\x47\x52\xb6\x09\x2c\xae\x83\x67\x21\x65\x2b\xa4\xcf\x56\x86\xc7\x99\xe3\x40\xa4\x7a\x31\xe2\xae\xf0\xb8\x7e\x0a\x9e\x65\x7d\x4d\x03\xd1\x36\xb0\x85\xde\x97\x38\xff\x62\x33\xdc\x15\x85\x2e\x5c\x2d\x26\x7a\x59\xf4\xf7\xe5\xe5\xe7\x15\x8b\x1c\xe9\x8c\x94\xdf\x62\x92\x6f\xb0\x8a\x93\xaa\x35\xec\xad\x9e\xc5\xc2\x37\x60\x57\x1a\xf1\x6f\xd3\xd2\xe3\xfc\x2c\x23\x35\xfc\x36\x6e\xb1\xc2\x7c\x9e\x92\x0a\x79\x0e\xf6\x75\x32\xc7\x00\xaf\x66\xe7\xf4\x8f\x67\xfd\xa7\xe4\xf9\x94\x39\xfb\x66\x6c\xdd\xf4\x4e\xa7\x38\x9b\xa8\xec\xeb\xa8\x9c\xda\x46\xfc\xd5\x69\x0b\x7f\xcb\x46\x75\x73\x91\xa0\x0f\xe2\x3d\x00\x00\xff\xff\x08\x4a\xda\xa3\xbe\x01\x00\x00"), - }, - "/src/syscall": &vfsgen۰DirInfo{ - name: "syscall", - modTime: time.Date(2019, 4, 10, 21, 27, 44, 476613264, time.UTC), - }, - "/src/syscall/syscall.go": &vfsgen۰CompressedFileInfo{ - name: "syscall.go", - modTime: time.Date(2019, 3, 5, 14, 33, 26, 82012629, time.UTC), - uncompressedSize: 1346, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x94\x41\x6f\xf3\x36\x0c\x86\xcf\xd6\xaf\x60\x8d\x01\xb1\xf1\xb9\x76\x7b\x0d\x90\x4b\x8b\xa1\xe8\x69\x05\xda\x61\x87\xae\x07\xd9\xa6\x1d\xa6\x0a\x65\x48\x74\x96\x6e\xc8\x7f\x1f\x64\x39\x6d\x92\x02\x1b\xf0\xdd\x0c\x8b\xa4\xf8\xf2\x79\xc5\xaa\x82\x1f\xf5\x48\xa6\x85\x8d\x57\x6a\xd0\xcd\xbb\xee\x11\xfc\x87\x6f\xb4\x31\x4a\xd1\x76\xb0\x4e\x20\x53\x49\x3a\xb2\xd7\x1d\xa6\x4a\x25\x69\x4f\xb2\x1e\xeb\xb2\xb1\xdb\xaa\xb7\xc3\x1a\xdd\xc6\x7f\x7d\x6c\x7c\xaa\x72\xa5\x76\xda\xc1\x5f\xda\x31\x71\xff\xe4\x88\x05\x5b\x58\x41\xa7\x8d\xc7\xe9\xc8\x10\xe3\xdd\xd8\x75\xe8\xe0\xf5\xad\xfe\x10\x54\xaa\x1b\xb9\x01\x62\x92\x2c\x87\x7f\x54\xb2\xf1\xe5\x83\xb1\xb5\x36\xe5\x33\x4a\x96\xfe\xd2\x99\xd1\xaf\xef\x2d\x7b\x6b\x30\x2d\x60\xe3\xcb\x47\x16\x74\xac\xcd\x6f\xf5\x06\x1b\xc9\x42\x7e\x4c\x4d\xa8\x03\x83\x9c\x7d\x5d\x92\xc3\xd5\x0a\x6e\xa6\xb3\x93\xc2\x0f\xa1\x70\x33\x97\xcc\xcb\x7b\x6d\x4c\x96\x1a\xdb\xa7\x05\x78\x71\xc4\xfd\x69\x85\x3c\xe4\x9e\xb4\xbd\x02\x26\xa3\x92\xe4\xa0\x92\x43\x9e\xab\xc3\x2c\x60\x08\x62\xff\x88\xc2\x63\x37\xd4\xc1\xd5\xc5\x24\x42\x1f\xff\xd3\x06\x3a\x67\x5d\x5a\x40\x3a\xa7\x2e\x03\x14\xc1\x2d\x04\x30\x1e\xd8\x0a\xe8\x9d\x26\xa3\x6b\x83\x05\x78\x44\x58\x8b\x0c\x7e\x59\x55\xff\x49\xa7\x36\xb6\xae\xb6\xda\x0b\xba\xaa\xb5\x4d\x35\x93\xf6\xe5\xb6\x4d\x73\x15\xc4\x7c\x83\x26\x6e\xc4\x73\x79\x2f\x76\xe6\x90\xd5\x33\xbd\x49\x68\x6f\x9f\xce\x4e\x61\xb9\x82\x0b\x95\x97\x21\xe1\x4e\xea\xe0\x5b\xe6\xd5\x94\xf9\x3b\xb7\xd8\x11\xcf\x03\xbb\x0c\x2a\x1f\x79\x67\xdf\x31\xfb\xee\x84\x7a\x82\xe5\x50\x46\xc7\x41\x93\x3a\xe7\xa6\x87\x01\xb9\x3d\x61\x5b\x40\x5d\x96\x65\xae\x92\xce\xba\xe8\x9f\xd0\x3a\x71\x8b\xfb\xbb\x0f\xc1\xb3\xc8\xc5\x9f\xbc\xc8\xa3\xc5\x08\x56\x2b\xb8\xbe\x8d\xae\xaa\x1d\xea\xf7\x68\x87\x9f\x74\xd8\xeb\x92\xde\xf2\x1c\xaa\x0a\x5a\xcb\x0b\x81\xd1\x63\x1c\xb7\xe1\x02\x3c\x71\x83\x40\x02\xad\xc5\x48\x1f\xf7\x51\x33\xfd\x8d\xb0\x1d\x8d\x50\xe0\x00\xcd\x5a\x3b\xdd\x08\x3a\xaf\x2e\xdc\x7a\x72\x11\xfd\xb8\x5d\xbe\x85\xc1\x1c\xa9\x8e\x1e\xb3\x01\xe2\x0b\x2f\x9f\x6c\x20\xef\x26\xa4\x55\x05\x6c\xaf\xed\xf0\x19\xf9\xeb\x9e\x24\x6b\x6c\x8b\x40\x2c\x53\xc8\x73\x74\x50\x86\x7b\x92\x17\xa7\x87\x02\x46\x62\x19\xc4\x4d\x61\x79\x01\x37\x05\xdc\x4c\xef\xa3\xaa\xbe\x66\x0a\xe4\xa1\xb1\x03\x61\x0b\x9d\xb3\x5b\x08\xcd\x7b\x38\xee\x1f\xb1\xa0\x77\x96\x5a\x88\xfb\x87\xb8\x0f\xd2\xb3\x38\x04\x59\x23\x38\xd4\xe6\xb8\xa5\x3e\xb3\xc2\x68\x78\x21\x79\x79\x5c\x25\x47\x7e\x7e\x76\x69\x01\x0d\x44\xb7\x12\x4b\xe8\x3d\xf0\xa6\x02\xea\x80\xdb\x69\x0e\x9b\xef\xb8\x3f\xea\x00\xb7\x89\x6c\xa3\x93\x80\xe6\xd7\xae\x8e\x3f\xae\x6f\xd5\x41\xfd\x1b\x00\x00\xff\xff\xa9\xfc\xcd\x86\x42\x05\x00\x00"), - }, - "/src/syscall/syscall_darwin.go": &vfsgen۰CompressedFileInfo{ - name: "syscall_darwin.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 533942322, time.UTC), - uncompressedSize: 844, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x90\x41\x6b\x32\x31\x10\x86\xcf\x9b\x5f\x31\xec\x29\xe1\x0b\x8a\x7e\xad\x37\x4f\xc5\x83\x27\x8b\x16\xda\x9e\x64\x36\x66\x35\xdb\x6c\x36\x4c\x66\x91\x52\xfc\xef\x45\x71\xeb\x41\x2d\x6d\xf1\xb2\x0c\x2f\x9b\xe7\x79\x67\xfa\x7d\xf8\x57\xb4\xce\xaf\xa0\x4a\x42\x44\x34\x6f\xb8\xb6\x90\xde\x93\x41\xef\x85\x70\x75\x6c\x88\x41\x8a\x2c\x5f\x3b\xde\xb4\x45\xcf\x34\x75\x7f\xdd\xc4\x8d\xa5\x2a\x9d\x86\x2a\xe5\x42\x09\x51\xb6\xc1\xc0\xfe\xf3\xf8\x20\xcb\xc3\x20\x95\x82\xd6\x05\x8e\x4c\xf0\x21\x32\x57\x42\x95\x7a\xd3\xc0\x96\x02\xfa\x59\x51\x59\xc3\xb2\x54\x30\x1e\x5f\xc8\xbd\x2b\xcc\x72\x4b\x8e\xed\x92\x09\xeb\xd8\x78\x17\xac\xda\x63\x32\xb2\xdc\x52\x80\xc5\xeb\x62\xf9\x3c\x9f\x3e\x4d\x44\xb6\x13\x5d\x78\xd4\xc9\xda\x85\x36\xcd\x82\x55\x62\x77\x6c\x76\x5c\x4b\x32\x61\xd4\x80\x03\x0d\x38\xd4\x80\xff\xbb\x27\x0a\x24\x0d\x34\xd0\xb0\x0b\x34\x58\x22\x98\x10\x85\xe6\xe0\xed\xb4\x57\x38\x67\xa6\xd1\xd9\x2f\x1a\xf0\x4e\x03\xde\x6b\xc0\xd1\x1f\xb5\xdf\x43\xcf\x3b\xbc\xdc\xa6\x44\xc4\xe0\x8c\xcc\xbf\xa8\xe0\x12\x84\x86\xc1\xd5\xd1\xdb\xda\x06\xb6\xab\xfc\x24\x27\xdc\x5e\xbb\xd2\x6f\xd7\x9e\x5f\x47\x5d\xf2\xdd\xf6\xe6\xf3\x1f\x72\xf7\x4d\x3e\x03\x00\x00\xff\xff\xb8\x46\xa3\x7f\x4c\x03\x00\x00"), - }, - "/src/syscall/syscall_linux.go": &vfsgen۰FileInfo{ - name: "syscall_linux.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 224187091, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x73\x79\x73\x63\x61\x6c\x6c\x0a\x0a\x63\x6f\x6e\x73\x74\x20\x65\x78\x69\x74\x54\x72\x61\x70\x20\x3d\x20\x53\x59\x53\x5f\x45\x58\x49\x54\x5f\x47\x52\x4f\x55\x50\x0a"), - }, - "/src/syscall/syscall_nonlinux.go": &vfsgen۰FileInfo{ - name: "syscall_nonlinux.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 224294407, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x2c\x21\x6c\x69\x6e\x75\x78\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x73\x79\x73\x63\x61\x6c\x6c\x0a\x0a\x63\x6f\x6e\x73\x74\x20\x65\x78\x69\x74\x54\x72\x61\x70\x20\x3d\x20\x53\x59\x53\x5f\x45\x58\x49\x54\x0a"), - }, - "/src/syscall/syscall_unix.go": &vfsgen۰CompressedFileInfo{ - name: "syscall_unix.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 534522478, time.UTC), - uncompressedSize: 3370, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x57\x4d\x6f\x1b\x37\x10\x3d\xef\xfe\x8a\x09\x51\x04\xdc\x88\x5d\x7d\xb4\x0d\x8a\xba\x3a\x38\xae\x6a\x08\x70\xed\x20\xb2\x9b\x16\x41\x60\x50\xda\x59\x99\xd2\x8a\x54\x49\xae\x1c\x21\xd1\x7f\x2f\xf8\xb1\x96\x2c\xe9\x50\x23\x41\x50\x20\x37\x61\xe7\xcd\xcc\xe3\x9b\x21\x67\xd4\x6e\x43\x6b\x5c\x8b\xaa\x80\x99\x61\xcf\xee\x85\x2c\xd4\xbd\x49\xd3\x25\x9f\xcc\xf9\x14\xc1\xac\xcd\x84\x57\x55\x9a\x8a\xc5\x52\x69\x0b\x34\x4d\x88\xae\xa5\x15\x0b\x24\x69\x42\x6a\x69\x78\x89\x24\x4d\x13\x32\x15\xf6\xae\x1e\xe7\x13\xb5\x68\x4f\xd5\xf2\x0e\xf5\xcc\x6c\x7f\xcc\x0c\x49\xb3\x34\x2d\x6b\x39\x81\xe8\x7e\x8b\x72\x65\x68\x06\xef\xde\x1b\xab\x85\x9c\xc2\xc7\x34\x59\x6a\x35\x41\x63\xe0\x97\x3e\xcc\x4c\x7e\x5e\xa9\x31\xaf\xf2\x73\xb4\x94\x44\x0b\xc9\xd2\x44\x94\xd0\xe0\xfa\x1e\x77\x23\x0b\x2c\x85\xc4\xc2\x85\x48\x34\xda\x5a\x4b\x90\xa2\x4a\x93\x4d\x9a\xcc\xcc\x40\xae\x5c\xc0\xe8\x13\xc2\xa1\x5c\xb9\x50\x28\x57\x73\x5c\x1f\xcb\x77\x35\x9e\xe1\xc4\x92\x2c\x3f\xe3\x55\x45\x89\x43\x11\x06\x3e\x58\xf0\xf3\x4e\x0b\x3e\x47\xda\x1c\x80\x41\x0c\x97\x5f\xa0\x9c\xda\x3b\x9a\x65\x69\x52\x2a\x0d\xc2\x41\x3b\x27\x20\xe0\xd7\x03\xc8\x09\x88\x56\xcb\xf3\x9e\xe3\xda\xe1\x1a\xc0\x50\x16\xf8\x81\x8a\x2c\x1f\xf9\xe0\x34\x4b\x13\x9f\xf6\x9d\x78\x0f\x7d\x70\xe0\x16\x90\x3e\x81\x56\x20\xe5\x59\xcf\x71\xbd\x8b\xdf\xa4\x8d\x18\xce\x31\xdd\x44\xfd\x0d\x5a\x94\xab\xdb\x09\x9d\x33\x58\x41\xe0\x9e\x7d\x59\xf5\x7d\xee\x43\xc1\xf3\x91\x23\xc9\x60\x95\x3d\x90\xa9\xe5\x96\xce\xd7\xe5\xf2\x1b\x56\x68\x91\xce\x3d\x97\x15\xd7\x4d\xab\xff\xa1\x8a\xba\x42\x78\x31\x33\x79\x68\x02\x6f\xe4\x95\x46\x5e\xac\xaf\xb5\xc0\xe2\x5a\x5d\x28\x5e\x40\x1f\x4a\x5e\x19\xf4\xe6\x85\x90\xb5\xb9\x92\x08\x7d\xf8\xbe\xdb\xe8\x1c\xe2\xbd\x5a\x5f\xf2\x05\x52\xc9\x17\xf8\x70\xc0\x6d\x70\x47\xb4\xc0\x12\x35\x38\x1f\x9a\x45\xe2\x13\xb5\x42\xed\x6b\xde\x6e\xc3\xb6\xa3\x41\x94\x10\x8d\x58\xa4\xc9\x86\x06\x11\x1e\x33\xef\xf7\x3d\xd4\x05\x12\xe5\x31\xe2\xce\xf2\xe8\x9a\x38\x85\x92\xa3\x27\xb4\xba\x46\x4f\xe8\x9f\x5a\x68\x3c\x52\x8d\x68\x71\xd5\x48\x3c\xb9\x00\x3c\x56\x8e\x64\xc9\xa5\x98\x50\xe2\xb1\x2e\xe3\x1e\xed\xc6\x39\x1f\xca\x95\x9a\x23\x25\xd1\x4e\x1e\xb5\xf2\x23\x27\xcf\xc1\x29\xbb\x6d\xa8\x51\xb0\x53\xab\xf9\x92\x01\xef\x32\xe0\x3d\x06\xfc\x07\xa8\x85\xb4\x4b\xab\x33\xa0\xba\xcb\x40\xf7\x9a\x0f\x0c\x50\x6b\x18\x68\x2d\x95\x57\x5f\x94\x50\xba\x83\x3e\x2e\x1f\x19\x35\x64\x4e\xa0\x84\x67\x5b\x89\xb5\xc3\x96\x0d\xe7\xfd\xac\xd9\xf6\x41\x8a\xe9\xa8\x8e\x57\xbb\x93\xe5\x43\x69\x69\x96\xb1\x03\x53\x77\x6b\xf2\xbc\x1e\x0c\xbd\xc6\xe0\x15\x11\x25\xb8\x7c\x4e\xec\xd1\xdf\xa3\xdb\xb7\x6f\x86\xd7\x03\x78\xfe\x1c\x28\xef\xba\x6f\x5d\xf8\xf4\x09\xc2\xcf\x5e\xe8\x2b\xae\x35\x5f\xc7\x22\x0e\xa5\x45\x2d\x79\x15\xda\x90\xf2\x9e\xa3\x6a\x2a\x31\xc1\x9d\x87\x6d\xbc\xb6\xc8\xc0\xbb\xed\x3e\x6a\xc9\xa1\xbf\xf7\x0c\x17\x9c\x7c\xe7\x1d\x48\x74\x74\xf8\xa5\x16\xd2\x5e\xab\x33\x25\x8d\xaa\x30\x82\x0f\xa5\xd9\x4b\xc4\xa0\xc3\xa0\xb3\x7f\x54\xfc\x20\xec\xb5\xfb\xed\xd5\x0f\xb3\x24\x3f\x57\xee\x73\x7c\xf4\x7c\xb6\xb7\x5c\xcb\xf8\x0e\xee\x65\x69\xee\x6a\x88\x3f\x38\x3d\x3b\x1b\x8c\xf6\xdb\xe7\xe5\x41\x25\x19\xf0\x1f\x19\xf0\x9f\x18\xf0\x97\x5f\xa8\x97\x5e\x3e\xb1\x99\x76\x29\x7c\x95\xc6\x7a\xd6\x87\x5e\xa7\x07\x1f\xa1\xdd\x86\x39\x6a\x99\x2b\xa3\xb1\x42\x6e\x10\x94\x84\xab\x11\xfc\xc5\xe0\x8e\x2f\x97\x28\x0d\x08\x09\x42\x0a\x0b\xaa\x04\xa2\x0c\x81\xb8\x40\x34\xc5\xdf\x29\xc7\xe6\x69\x15\x79\xc3\xef\xbf\xa1\x3b\xfd\x39\xbd\xab\x1f\x94\xba\x54\x03\xad\x95\xfe\xef\x82\xfd\xef\x54\x7a\xaa\x18\x47\xda\xe5\xdb\xbe\xc3\x9f\xd3\x48\xaf\xd6\x16\x5f\x5b\xfd\xbb\x56\x8b\xb8\x4d\x9a\x87\xd5\x85\xbe\x08\x43\x01\x5d\x83\x79\x81\x76\xa7\xca\xee\x6a\x70\x23\xa4\xfd\xf9\xd4\x8f\x82\x2c\xbf\xc4\x7b\x5a\xa1\xa4\x26\x83\x16\x74\x9b\xc5\x98\xc1\xd8\x39\x6a\x2e\xa7\x08\x61\xdc\x38\x44\x5c\x5d\xc6\xee\xb9\xef\xec\xaf\x2b\x0c\x06\xc3\xcb\x3f\x4f\x2f\x9a\xb5\xc5\xcf\x8c\x11\xda\xb8\x30\x33\x18\x07\x01\xf6\x0c\x21\x39\x83\xce\x56\x8b\x70\x94\x8c\x86\x3f\x31\xf9\x6b\x25\xdc\x4c\x8b\x53\xe8\xc6\x7f\xa4\x99\xd3\xd9\x2d\x49\x9b\xf4\xdf\x00\x00\x00\xff\xff\x77\x4c\x60\x3e\x2a\x0d\x00\x00"), - }, - "/src/syscall/syscall_windows.go": &vfsgen۰CompressedFileInfo{ - name: "syscall_windows.go", - modTime: time.Date(2019, 3, 30, 12, 48, 4, 535202773, time.UTC), - uncompressedSize: 2566, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x96\xdf\x6f\xdb\x36\x10\xc7\x9f\xad\xbf\xe2\xe0\x87\x81\xf4\xb8\x38\x72\x97\xcc\x29\xe0\x87\x20\x76\xd3\x01\xee\x52\x24\x2e\x0a\xac\x28\x0a\x4a\x3c\xc9\x6c\x29\x52\x20\x29\xa3\x4e\x9a\xff\x7d\xa0\x7e\xd8\xca\x92\x02\x1b\x1a\x18\x7d\x91\x45\xde\x97\x77\x9f\xfb\x21\xc2\xe3\x31\xfc\x9a\x54\x52\x09\xf8\xec\xa2\xa8\xe4\xe9\x17\x9e\x23\xb8\xad\x4b\xb9\x52\x51\x24\x8b\xd2\x58\x0f\x43\x5b\x69\x2f\x0b\x1c\x46\xd1\x86\x5b\x28\xa4\xae\xdc\x95\x46\x98\xc1\x6f\x71\x14\x65\x95\x4e\xe1\xa6\x39\x42\xbc\xe5\x25\x03\xcd\x6d\xee\x18\xf0\x98\x01\x9f\x30\xe0\x2f\xa0\x92\xda\x97\xde\x52\x20\x36\x66\x60\x27\xdd\x06\x03\xb4\x16\x16\xd6\x6a\x43\xe1\x2e\x1a\x94\x56\x6a\xff\x9e\x5b\x2d\x75\x4e\x68\x34\xb0\xe8\x2b\xab\x3b\x35\xe9\x42\x53\x06\xc7\x0c\x16\xe7\x17\x17\x8b\x9b\xe8\xfe\x21\xc3\xe9\xf7\x20\x18\xf0\xdf\x19\xf0\x13\x06\xfc\xf4\x90\x40\x67\xff\x05\x88\x01\xff\x83\x01\x9f\x32\xe0\x67\x87\x84\x8b\x27\xff\x97\x2e\x68\x8e\xc3\x23\x28\xe3\xc9\x41\x61\x4f\x7e\x10\x36\x3c\x82\x36\x0e\xe2\xf8\xe4\xa0\xec\xd3\xe7\x65\x0f\x8f\x20\x8f\x83\x3e\x9e\x1e\x24\x15\x65\xb8\x50\x32\xb1\xdc\x6e\x49\x26\x15\x6a\x5e\x20\x8c\xc2\xd1\xf8\x94\x02\x59\x73\x2d\x14\xfe\x78\xe0\x7f\x45\xcd\xd1\x97\xd6\xa4\x5c\x08\x8b\xce\x3d\x8a\x12\x6c\x7b\x90\x29\x05\x12\x76\x9e\x9d\x82\x08\x18\x2d\xf9\xed\x76\xbe\x5c\x52\x58\x1a\x2e\x08\x0d\xae\x8d\x0d\x5e\x5b\x2f\xbf\xcc\x97\xcb\x45\xd8\xbb\x7b\xe3\xf2\x97\x30\x74\x5b\xe7\xb1\x80\xd0\x7e\x07\xda\x78\xe0\x1b\x2e\x15\x4f\x14\x32\x70\x88\xb0\xf6\xbe\x74\x2f\xc7\xe3\x5c\xfa\x75\x95\x1c\xa5\xa6\x18\xe7\xa6\x5c\xa3\xfd\xec\xf6\x2f\x89\x32\xc9\xb8\xe0\xce\xa3\x1d\x0b\x93\x8e\xdb\xdb\xd9\x1d\x15\x62\x78\xbf\xc7\x2b\x1b\xbc\xb7\xd6\xa4\x14\x5e\x49\xfd\x93\xf1\xe5\xe8\x6f\xbc\x78\x5d\xf7\x8e\xac\x41\x6a\x4f\x81\x64\x02\x9a\x9d\xba\x35\x32\x83\x35\xcc\x66\x70\xb3\x9a\x7f\xba\x7a\xb7\x7a\xfb\x6e\xf5\xe9\xf5\xf9\x5f\xf3\xe5\x22\x18\xbb\x14\xe2\x68\x70\xff\x50\xba\xb8\xbe\xbe\xba\x7e\x42\x39\xa9\x95\xed\xe2\x78\x07\x72\x89\xfe\xc2\x68\x67\x14\xbe\x31\x02\x49\xda\xbc\xb7\x1c\x0c\x0a\x23\xda\x49\x7a\x31\xa1\x40\xc2\xf0\xd4\x55\xa4\xbd\x32\xce\xab\xa2\xd8\x36\x75\xdc\x27\xf8\xde\x4a\x8f\xaf\x64\xc8\xae\x19\xd0\xce\x63\x52\x65\xf0\xe1\x63\xb2\xf5\xc8\x40\x18\xbd\xf3\xce\xc0\x6c\xd0\x2a\x5e\x96\x28\x60\x74\xb5\x7b\x7f\x14\x35\x24\xdb\xb8\x9c\xcd\x20\x86\x6f\xdf\x7a\xcb\x49\x9d\x71\x3d\xd4\x2b\xd3\xe6\x45\x92\x2a\xa3\xd1\x60\x30\xaa\xa3\xcd\xa0\x09\x47\x14\xea\xda\x42\xf7\x25\xd2\x52\xd5\x45\xfa\xce\x47\x11\xcc\x5d\x7a\x8b\xaf\xd2\x87\xd9\x0a\x5f\x20\x7e\x95\x3e\x0d\x75\xea\xca\x14\x4a\xd3\xfc\x45\x38\xba\x34\xc1\x4a\xe8\xc3\x7a\x17\x05\xd7\x62\x29\x35\x12\x0a\x24\x2d\xc4\xfe\xd2\xd8\x55\x75\x77\xa0\xa7\x5e\x99\x73\x9b\x6f\xfa\x07\x18\x70\x9b\xa7\x30\xea\xfa\xc3\x6d\xbe\x81\xd1\x87\x69\x7c\x36\xf9\xd8\xfe\x74\xc2\x27\x5b\xa7\xa5\x62\x4f\xf7\xef\x12\x3d\xea\x0d\xf9\x82\x5b\x70\xde\x4a\x9d\x53\x20\x1b\xae\x2a\x6c\x97\x0c\x32\x53\x69\x01\x89\x31\xaa\xef\x71\x38\x64\x90\x71\xe5\xb0\xef\x69\x25\x0b\xfc\xdb\x68\xfc\x53\x67\xc6\x16\xdc\x4b\xa3\x89\xbf\x95\x30\x0a\x86\x5b\xa3\x51\xee\x0d\xe1\xc6\x4e\xa1\x9b\x89\x27\xa9\x8f\x1f\x33\xfb\x6d\x89\xbd\xcd\x00\x59\xa5\xfe\x6e\x77\x1d\xf4\x8d\x14\xea\x1f\x42\xdb\x54\x1e\xd0\x47\xf7\xd1\x3f\x01\x00\x00\xff\xff\x47\x14\x60\x60\x06\x0a\x00\x00"), - }, - "/src/testing": &vfsgen۰DirInfo{ - name: "testing", - modTime: time.Date(2019, 3, 27, 13, 36, 51, 785397840, time.UTC), - }, - "/src/testing/example.go": &vfsgen۰CompressedFileInfo{ - name: "example.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 224716063, time.UTC), - uncompressedSize: 1424, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x54\x5d\x6b\xf3\x36\x14\xbe\x96\x7e\xc5\xa9\x20\x45\x5a\x5d\x85\xdd\x06\x7c\x51\xb6\x06\x0a\xa5\x2b\xcd\x7a\x57\x18\xaa\x73\xec\x6a\xb5\x25\x23\xc9\x49\xc7\x9a\xff\x3e\x74\xec\x7c\x8e\xf7\xe6\xbd\x09\x91\x2c\x3d\xe7\xf9\x38\x47\xf3\x39\xdc\xbc\x0f\xb6\x5d\xc3\xdf\x91\xf3\xde\x54\x9f\xa6\x41\x48\x18\x93\x75\x0d\xe7\xb6\xeb\x7d\x48\x20\x39\x13\x75\x97\x04\x67\xc2\xc7\xfc\x1b\x53\xb0\xae\xa1\xbf\xc9\x76\x28\xb8\xe2\xbc\x1e\x5c\x05\x61\x70\xf7\x5f\xa6\xeb\x5b\x94\xd8\xc0\x83\x4b\x18\x9c\x69\xa7\x2d\x05\xd2\x7f\xc2\xbb\xf7\xad\x82\x7f\x39\xb3\x35\xfc\x52\x7d\x98\x94\xfe\xc9\x2b\x56\x77\x49\x3f\x07\xeb\x52\x2d\x45\x59\x96\xf0\xf2\xfa\x04\x00\xb3\xf8\xe6\x44\x01\xd8\xe8\x27\xd3\xa1\xe2\x6c\xc7\x39\x9b\xcf\xe1\x37\xd3\xa7\x21\x20\xc4\xb4\xf6\x43\xd2\x9c\x8d\x7f\x60\x51\x82\x8f\x7a\x45\x0b\xce\xb6\x05\x60\x08\x79\x33\x61\xd7\x2f\x6d\x8b\x52\x68\x01\x37\x7b\x3c\xb8\x01\xa1\x27\x08\xa1\x88\x52\x3e\x7f\x55\x82\xb3\xed\x81\xd5\xb2\xcf\xb4\x5a\x27\x47\x64\x0c\x81\x60\x15\x67\xcc\x47\x7d\xff\x65\x93\xfc\x95\x98\xb1\x43\x69\x28\x61\xcb\x33\x29\x13\x88\x53\x76\x49\x3f\xf9\xad\x54\x9c\xf9\x4f\x28\x21\x85\x01\x27\x25\x2d\x1a\x07\x43\x0f\xd6\x81\x81\x35\xd6\x18\x02\xae\xa1\x32\x6d\x0b\xd1\xc3\x16\xa1\x32\x0e\x02\x56\x7e\x83\x01\x6c\x0d\xe9\x03\x01\x47\x47\xa1\x37\xce\x56\x51\x73\x46\xf7\x20\x67\x20\xc9\x5c\xb6\x8e\x89\x84\xd7\x5d\xfa\x7d\x08\x26\x59\xef\xe4\x91\x85\x5e\x0d\xef\x92\xd8\x29\xc5\x39\x1b\x79\xf8\x88\x50\xdb\x16\x0b\x08\x18\x93\x3f\xb8\x5b\x40\x83\x09\xfc\x90\x7a\x72\x9a\x6d\x35\x9d\x95\x93\x01\x07\xc5\x71\x72\x9d\xd1\x9d\x80\x66\x9d\x1d\xbf\x1f\x03\xd8\x2f\xe5\x96\x9c\x97\x2a\xdf\xfe\x0b\x28\xae\x17\xec\xfc\xe6\xfc\x8b\xad\xcf\x00\x4e\x12\x39\x89\xa4\x3e\x4d\x44\x4c\x5d\xbb\xa0\x8b\xd6\x35\x13\x1f\x92\xb4\x80\xd9\x86\x1a\xe9\x04\x34\x97\x39\x0b\x90\x7a\x8b\x6d\x4c\x80\xda\xd8\x16\xc6\x26\xe7\x8c\xe1\x5e\x01\x45\x40\xb2\x1b\x4f\xb1\x4e\x73\xa0\xff\x0c\xb6\x5b\xf5\xa6\x42\xe9\x87\x94\xbf\x6f\x8d\xfb\xc1\x01\x6c\xf4\x1f\xe4\xe4\xa4\x12\x1b\xfd\xea\x7c\x58\x63\x0e\x9d\xf4\xd9\x1a\xa2\x0f\xe9\xd1\x3a\x8c\xb2\xf1\x49\x65\xf5\xc7\x9d\x0c\xad\xe0\xfa\x9a\x3a\xb5\x3c\xf1\x85\x11\x6b\x4a\x5c\xaf\x26\x7f\x44\xe3\xd3\xe2\xcd\xe5\x29\x22\x4a\x72\xd8\xd7\x52\xd3\xb6\x28\x80\xe2\x3a\xe3\x95\x7b\x99\xed\x00\xdb\x88\x07\x4e\x59\xf2\x55\x09\x04\xf3\x73\xd5\x8f\x15\x1b\x9f\x0a\x42\x3a\x16\x1b\xdd\x20\x90\xab\x12\x84\x80\xef\xef\xcb\x59\x3c\x7b\x22\x6e\x6f\x6f\x61\x79\xf7\xf0\xb8\x80\x59\x04\x39\x8b\x2a\x83\x1f\x5f\x8a\x02\xf2\x00\x14\x04\x38\x06\x9d\xa7\xae\x36\x6d\xc4\xa3\xb4\x8b\x17\xe8\x7f\xf8\xcf\x77\xab\xd5\x09\xfe\x25\xba\x3a\xf2\xbe\x64\x4a\x73\x29\xa7\x47\x62\xc7\xd9\x4e\xaa\x71\xda\x5f\x06\xb7\x1f\x5e\xcd\x19\x36\x7a\x99\xfb\x29\x60\x1a\x82\xe3\x3b\xfe\x5f\x00\x00\x00\xff\xff\x6d\xa8\x39\x72\x90\x05\x00\x00"), - }, - "/src/testing/ioutil.go": &vfsgen۰CompressedFileInfo{ - name: "ioutil.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 224830486, time.UTC), - uncompressedSize: 1163, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x53\x61\x6f\x23\x35\x10\xfd\x6c\xff\x8a\x21\x12\xc8\xbe\x8d\x36\xbb\x69\x2f\x52\x7b\x04\xe9\xc8\x05\x74\x52\x29\x28\x6d\x05\x12\x42\x95\xb3\x3b\x2e\x43\x37\xf6\xca\xf6\x96\x44\xd0\xff\x8e\x6c\x6f\xb7\x94\x4f\x7c\x48\x76\x3c\x9e\x7d\xf3\xe6\xcd\xdb\xc5\x02\x8a\xfd\x40\x5d\x0b\x7f\x78\xce\x7b\xd5\x3c\xaa\x07\x84\x80\x3e\x90\x79\xe0\x9c\x0e\xbd\x75\x01\x04\x67\xb3\xfd\x29\xa0\x9f\x71\x36\x23\x1b\xff\x6d\x8a\x7d\x70\x8d\x35\x4f\x29\x3c\x99\x26\x3e\x03\x1d\x70\xc6\x25\xe7\x4f\xca\x81\x53\xa6\x85\x81\x4c\x38\x5b\x4e\xe7\xc3\x00\xb1\xb6\xfc\x61\x08\x78\xe4\x5c\x0f\xa6\x01\x87\x1e\xb1\x15\x72\xac\x85\xbf\x38\x73\x18\x06\x67\xc6\x84\x88\xa8\xe5\xb5\xfd\x53\xc8\xf2\xce\xd0\xf1\x5a\x19\x2b\x24\x14\x40\x26\xac\xce\x85\xf5\xe5\xf7\x18\x7a\x6a\x85\x94\x92\x3f\x8f\xa0\x06\x8f\xe1\x66\xd0\x9a\x8e\x42\x82\x0f\x8e\xcc\x43\x02\x4e\x1c\xca\x2b\xdb\x3c\x0a\xc9\x99\x83\xcb\x75\xe2\xc5\x19\x69\x70\xb0\x5e\x43\x15\xcb\x98\x83\xf5\xc4\x8b\xb3\x67\x9e\x13\xef\xea\xd5\xea\xfc\xfd\xf2\x3d\x14\x50\x57\xf5\xd9\x45\x75\xbe\x5c\x9e\xc1\x62\x01\x8d\x35\x3e\x28\x13\x3c\x68\x67\x0f\x70\x3d\x1c\xd0\x51\xa3\x3a\xd8\x61\x43\x3d\xfa\xdc\x38\x42\x4c\x14\xee\x4c\xf7\x42\x22\x0f\x3b\xca\x59\x7e\x0e\x56\x09\x32\x41\xd4\x78\x01\x05\xb8\x2f\x6b\xbc\x90\xf2\xd7\xfa\xf2\xb7\x38\xdc\x62\x01\x1f\x21\x4e\x18\xc8\x1a\xd5\x41\x63\xfb\x13\x58\x0d\x64\x87\x40\x5d\x79\x8b\x87\xfe\x3b\xea\x70\x0e\xc1\x82\x7a\xb2\xd4\x02\x1e\x83\x53\x90\x97\xe9\xcb\xac\x4e\x18\xcb\x44\xef\x50\xd3\x71\x14\x48\x82\xd0\xf0\xce\xfa\x32\x23\xa0\x73\xf1\x67\x9d\x8c\x92\xb4\x94\xc4\xb2\x3e\xf5\xf8\x44\x4e\x48\xce\x99\x69\xac\xd1\x1d\x35\x21\xde\x55\x9c\x69\xeb\x80\x52\xfc\x01\x08\xbe\x86\xba\xaa\xaa\x18\x16\x45\x92\xd5\xa8\x03\xc6\xdb\x08\x56\x8c\x5d\xe3\x02\x7f\x52\xe1\xf7\x1b\xec\x95\x53\x21\xb6\x2b\x60\xe4\x55\xbc\xd9\x23\x67\x4c\x67\x5a\x89\xc7\x8f\x3d\x9a\x34\x44\x44\x9d\xa7\xcc\xfd\xee\xd3\xcf\xbb\xbf\x53\xb4\xd9\x6d\x3f\xde\x6e\x73\xbc\xfd\x65\x73\x35\x87\x6a\x55\x55\x11\x83\x74\xac\xfd\xec\xb7\x47\xf2\x41\xa0\xcb\xf3\xa5\xfc\x34\x4e\x51\x7c\x78\x3d\xc0\x37\x50\x67\x5b\xb0\xff\x1a\x68\xcc\xbc\x71\xcb\x6b\xd5\xeb\x8e\x59\xf4\x10\x63\x8d\x35\x81\xcc\x80\x3c\x9f\xf7\x0e\xd5\x63\xb6\x57\xf2\xc0\xe4\x5e\x87\xaa\x4d\xa3\x69\xea\x30\x89\x36\x6d\x28\x07\xf3\x7f\x6d\x66\xd4\xe4\x72\x12\x65\x7a\x4b\x26\x5b\xc7\xcb\x2f\xd6\x60\xa8\xcb\xd6\xce\x76\x9b\xcd\xd2\x6b\xa9\x7b\x8b\x1a\x1d\xe8\x72\xd3\x59\x8f\x91\x6e\xfc\x5c\xf7\x83\x86\xf4\xdd\x97\xdf\x0e\x5a\xa3\xe3\xec\xfe\x45\x7c\xb2\xe5\xc6\xf6\x27\xf1\xd5\x7e\xd0\x73\xd0\xff\xb7\xcd\x98\xda\x0f\xba\xbc\xc9\xab\x97\xf3\x58\xcf\x9f\xf9\x3f\x01\x00\x00\xff\xff\x2f\x92\x73\x9b\x8b\x04\x00\x00"), - }, - "/src/testing/testing.go": &vfsgen۰CompressedFileInfo{ - name: "testing.go", - modTime: time.Date(2019, 3, 5, 14, 5, 37, 324073976, time.UTC), - uncompressedSize: 642, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x91\x4f\x6f\xd3\x40\x10\xc5\xcf\xde\x4f\xf1\xc8\x85\x16\x2c\xfb\x1e\x01\x17\x50\xf9\x23\x44\x0f\x6d\xcf\x68\x63\x8f\xe3\xc1\xeb\x59\xb3\x33\x4b\x04\x55\xbe\x3b\xda\xa4\x11\x21\xea\x79\xe7\xf7\x7b\x6f\x66\xdb\x16\xaf\x37\x99\x43\x8f\x1f\xea\xdc\xe2\xbb\xc9\x6f\x09\x46\x6a\x2c\x5b\xe7\x78\x5e\x62\x32\xac\x52\x16\xe3\x99\x56\xce\xb5\x2d\xee\x47\x42\x5e\xd4\x12\xf9\x19\x9d\x0f\x81\xd2\x37\x3f\x13\xbc\xf4\x18\x92\x9f\xe9\x6e\xe2\x05\x89\xc2\x6f\x44\xc1\x13\xda\xbc\x3f\x0c\x6a\x5d\x0c\x65\x72\xf1\xc2\x1d\x78\x80\x8d\x94\x08\x3e\x11\xfe\x50\x8a\x4f\x42\xc5\x10\xb3\xf4\x0d\x3e\xc5\x1d\xfd\xa2\x54\x5f\x7a\x8a\x86\x15\x12\x0d\x3c\x2f\x81\x66\x12\xa3\x1e\x43\x4c\xf8\x18\x97\x91\xd2\x97\x3b\x78\x83\x8d\xac\x28\x5c\x0d\x8d\xd8\x11\x3a\x2f\x2f\x0d\x59\xa9\x08\x6c\xf4\x67\xb8\x37\x8e\xd2\xe0\x41\xa9\x74\x52\x82\x5a\xde\x28\x58\xd4\xc8\xf7\x8d\x1b\xb2\x74\x67\xfb\x5e\x69\x59\x93\xc5\xae\xa1\x96\x58\xb6\x78\x74\x55\xdb\xe2\xe1\x99\xd3\x24\xfa\x99\x39\x91\xc2\xa3\x58\x4a\x90\x0f\x97\x2b\x35\x07\xfc\xfe\xf6\xc3\xed\x1a\x9f\x4f\xa5\xca\x85\x96\xa8\xca\x9b\x40\x8d\xab\x12\x59\x4e\x82\xd5\x9b\x2c\x93\xc4\x9d\xbc\x5b\xb9\xbd\x3b\x36\xbb\x7a\xd5\xc5\x79\x8e\x72\xfd\xef\x13\xce\x2a\x9e\xb2\x6e\xca\x5b\x69\xfa\xbd\xc6\xc0\x81\x6a\x04\x16\xaa\x11\x27\xac\xdf\x5e\x34\x3a\xe0\xd7\xae\xe2\x01\x2f\xe2\x54\xa0\x53\xfe\x7f\xb6\xc7\xbd\xab\xf6\xee\xf9\x27\x57\x55\x37\x1c\x68\x7d\xcc\x72\x55\xf5\x95\x85\xd6\xc7\xcc\x42\xed\xdd\xdf\x00\x00\x00\xff\xff\x1b\x9f\xb2\xfc\x82\x02\x00\x00"), - }, - "/src/text": &vfsgen۰DirInfo{ - name: "text", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225056982, time.UTC), - }, - "/src/text/template": &vfsgen۰DirInfo{ - name: "template", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225121786, time.UTC), - }, - "/src/text/template/template.go": &vfsgen۰FileInfo{ - name: "template.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225166851, time.UTC), - content: []byte("\x2f\x2f\x20\x2b\x62\x75\x69\x6c\x64\x20\x6a\x73\x0a\x0a\x70\x61\x63\x6b\x61\x67\x65\x20\x74\x65\x6d\x70\x6c\x61\x74\x65\x0a\x0a\x63\x6f\x6e\x73\x74\x20\x6d\x61\x78\x45\x78\x65\x63\x44\x65\x70\x74\x68\x20\x3d\x20\x33\x30\x30\x30\x0a"), - }, - "/src/time": &vfsgen۰DirInfo{ - name: "time", - modTime: time.Date(2019, 4, 10, 21, 38, 35, 793313431, time.UTC), - }, - "/src/time/time.go": &vfsgen۰CompressedFileInfo{ - name: "time.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225365536, time.UTC), - uncompressedSize: 2155, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x55\xdf\x6f\xdb\x36\x10\x7e\x26\xff\x8a\x9b\xb0\x21\x64\xa3\x48\xf9\x51\x64\x58\x10\x0f\xd8\x92\x35\x08\xd0\xd4\xc0\x92\xbe\xac\x28\x06\x9a\x3a\xd9\x74\x64\x52\x20\xa9\x38\x8e\xeb\xff\x7d\x20\x29\x2b\x76\xbb\x15\x98\x9e\xc4\xe3\xf1\xee\xfb\x3e\x1e\xef\xca\x12\x0e\x27\x9d\x6a\x2a\x98\x3b\x4a\x5b\x21\x1f\xc5\x14\xc1\xab\x05\x52\xaa\x16\xad\xb1\x1e\x18\x25\x99\xed\x74\xb0\x65\x94\x92\x6c\xaa\xfc\xac\x9b\x14\xd2\x2c\xca\xa9\x69\x67\x68\xe7\xee\xf5\x67\xee\x32\xca\x29\x2d\x4b\xb8\x13\x8f\x08\xae\xb3\x29\x5a\xf1\x51\xab\x67\xa8\x3b\x2d\x41\xe8\x2a\x99\x1e\xd4\x02\xc1\x79\xdb\x49\x0f\xca\x83\x45\xdf\x59\xed\x40\x58\x04\xd1\x2c\xc5\xca\x81\xd2\xb2\xe9\x2a\xac\x60\xa9\xfc\x0c\xfc\x4c\x39\xd8\x42\x64\x15\xba\x56\x79\x84\xeb\xab\x3f\x78\x1e\x12\x4e\x50\x8a\xce\x21\xf8\x19\xae\x0e\x2c\x82\x46\x0c\x47\x6b\x63\x41\x69\x8f\x56\x8b\x46\xbd\x08\xaf\x8c\x2e\xf1\x79\x6f\x0d\xa6\x7e\x45\x54\x5e\x0b\x8f\x05\xdc\x23\x82\x72\xae\x43\x98\x79\xdf\xba\x8b\xb2\xfc\x2e\xef\xe8\xea\xca\xd3\x9f\x7f\x29\x68\x64\xa9\xb4\xf2\x8c\xc3\x9a\x92\xb2\x04\xf1\x64\x54\x05\x15\x8a\x0a\xa4\xa9\x10\xb0\x51\x0b\xa5\x63\x6e\x4a\x9e\x84\x85\xbf\x21\x8a\x31\x82\x20\x13\x3b\xce\xe1\x98\xd3\x0d\xa5\x7e\xd5\x22\xf4\xda\x07\x07\xbb\x95\x6b\x4d\x89\x82\xf4\x29\xed\xcf\x4e\x29\x59\xce\x50\xf7\xcb\xf3\xb7\x94\xb4\x68\x95\xa9\x86\x65\xdd\x3b\x07\x68\x2c\xaa\x51\x0b\x89\xeb\x4d\x0e\x9d\xd2\xbe\xf5\x96\x53\x22\xec\x74\x1b\x70\xbb\x4d\x49\xc8\x6c\x3a\x0f\x6f\xe6\xae\x18\x4f\xe6\x28\x3d\x25\x42\x7a\xf5\x84\x00\x13\x63\x9a\x80\x72\xe0\xfb\xde\x48\xd1\x24\xd2\x15\x5c\x8c\x60\xee\x8a\x9b\xc6\x4c\x44\x53\xdc\xa0\x67\x59\x10\x36\xe3\xc5\x07\x5c\x32\x4e\x89\x0b\x1e\x55\x71\xef\xad\xd2\xd3\x60\x50\xc1\xa0\x74\x85\xcf\xbf\xaf\x3c\x32\x97\xc3\x01\x3b\xe0\x94\xcc\xbf\xb5\xf3\x60\x57\x35\x28\x18\x8d\xe0\xe8\x04\xbe\x7c\x81\x79\xff\xbb\xa6\x84\x34\x01\xc7\x7b\x23\x0b\x2d\xa2\xa8\xd9\xc7\x87\xab\x8c\x12\x92\x2a\x8c\x92\x0d\xfd\xc6\xc5\x7d\x52\x87\x27\x70\x01\xf3\xcf\x3b\x7b\x2f\x46\x87\xbd\x4f\x9f\xc3\xcf\x7a\xbd\x77\x26\x87\xaa\xb8\x12\x4d\xc3\xb2\x29\xfa\x70\x37\xc1\x67\x5c\xd7\x0e\x7d\xc6\x8b\x5b\x1d\x2e\xff\x0d\x1c\x9d\x1f\xe7\x50\x8b\xc6\xe1\x66\x33\x48\xd5\x5f\xe8\x07\xa1\x0d\xe3\xe9\x86\x02\xec\x84\xee\x7b\xa2\xed\x27\x4c\x69\xce\xdf\xc6\x44\x31\x0a\xbb\x53\x4d\xa3\x1c\x4a\xa3\x2b\x3e\xa4\xd3\x66\xc9\x38\x30\x87\x32\x79\xe5\xa0\xfb\xff\xb3\xd3\x1c\x16\x46\x9b\x64\x8f\xf7\xa6\x83\xd8\x7b\x00\x07\x60\x1a\xca\x3e\xcd\x7d\xca\x90\xa7\x18\x4c\xc3\x4f\xfb\x1b\x3c\x07\x3d\xa4\xbf\x6f\x10\x5b\x56\xc1\x75\x67\x63\xc1\xc7\x34\x32\xa4\x59\x88\x47\x64\x72\x26\x74\x5f\xd5\xeb\x4d\xb8\xed\x81\x7e\x22\xfb\xa3\x4b\x6c\x4d\xe7\xb3\x3c\x88\x73\xdb\xbf\xe5\x54\x8d\x2c\x56\x34\x87\x35\xc8\xc6\x38\x64\x92\xc3\x26\x01\x63\x55\xb9\x2b\x07\xa7\xe4\xf2\x48\x0e\xa8\x9c\x17\x36\xc6\xb5\xcc\xc3\x9b\xdd\x27\x16\xf1\xf9\xa2\x2f\xf2\x11\x78\xdb\x21\x25\x95\xaa\xeb\x80\x99\xf9\x22\xbe\xb4\xa3\x7d\x91\xf8\xa0\xcd\xde\x15\x84\x1a\x8d\x27\x7f\x85\x93\xcb\xcb\xb3\x93\x50\x9f\x50\x96\xb0\x10\x7e\x56\xdc\x89\xe7\xdb\xf4\x76\x77\x0b\x73\x7b\xe2\x12\x8e\x63\x2d\xc7\xc5\x08\x8e\xe3\xa6\x2f\xb6\xef\x71\xf7\x71\xfd\x3f\xa1\x28\xd9\x65\x17\x6b\x93\x92\x90\xd6\x17\x7d\xd3\xf8\x61\xd4\xe7\x26\x3d\xd9\xc3\xd1\xb0\x19\xac\xbb\xda\x71\x4a\x02\x30\x32\x35\xe0\x8b\x9a\xf9\x42\xd8\x69\xec\x5e\x24\x5c\x43\x00\x7f\x78\xc2\x77\x54\x37\xed\x7f\x88\x1e\x9a\x49\x48\xfa\x35\x2d\xd9\xa0\xb0\xaf\xbc\x06\x05\x38\x25\x4b\xe1\x7e\x4b\x3c\x2e\x02\xc0\xc4\x89\xfe\x0b\xbb\xbe\x80\x07\xff\x01\x4f\x6d\xac\xc4\xbf\x54\xfb\x4e\x35\xf8\xce\xd8\x07\x74\x3e\x34\xa3\x17\xd5\x8e\x75\xb3\x8a\x98\x82\x62\x1b\x4a\x43\x93\x0e\x2f\xfc\xde\x74\x56\xa2\x8b\x5d\xc1\xc5\xd6\x15\x5e\x6e\x62\x52\xdc\x8c\xff\x1c\x8f\x1f\x18\x87\x43\xc8\xca\x46\x4d\xca\x60\x2d\xc3\x31\xa5\x6b\x53\xbc\xa8\x36\xcb\x43\xb0\xb2\x7c\xed\x67\xa0\x1c\x48\xd3\xaa\x30\xa9\xac\x59\x40\x0a\xfa\x3a\xe7\xbc\xe9\xa7\x47\x9a\xc6\x4a\x4f\xc3\xac\x64\x4e\x69\x19\x47\x1d\x58\x14\x4d\x9c\x5e\xc3\x91\xca\xa0\xd3\x07\x9e\x0f\x93\x68\x68\x9d\x7d\xf4\x1c\x24\x4c\x56\x1e\x63\xf3\xd9\x6f\x3d\x5f\x15\x8d\xdb\xf6\x9c\x18\x64\x5c\xa7\xca\xda\xed\x4f\xa9\x7f\x67\x5b\xbf\xc0\xe1\x6a\x26\xec\x95\xa9\x30\xcb\x41\xf2\xbe\x17\xd2\x0d\xfd\x27\x00\x00\xff\xff\xbc\xb4\x65\x1c\x6b\x08\x00\x00"), - }, - "/src/time/time_test.go": &vfsgen۰CompressedFileInfo{ - name: "time_test.go", - modTime: time.Date(2019, 4, 10, 21, 38, 13, 208335917, time.UTC), - uncompressedSize: 147, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\x8c\xc1\x0e\x82\x30\x10\x05\xcf\xec\x57\xbc\xf4\xd4\x6a\x02\x7f\xe2\x05\xee\xa6\xd6\x05\x56\xa0\x6d\xe8\x36\x1e\x8c\xff\x6e\x9a\x78\x9d\xc9\xcc\x30\xe0\xfa\xa8\xb2\x3f\xf1\x2a\x44\xd9\x87\xcd\x2f\x0c\x95\x83\xef\xca\x45\x89\xe4\xc8\xe9\x54\x58\xea\x4c\x03\x12\x17\x43\x8e\x68\xae\x31\x60\xe2\xa2\xe3\xce\x9c\xad\xe2\xf2\xb7\xfd\xe4\xf0\xa1\x4e\xfb\x71\x93\x6c\x4d\x3b\xf5\xb7\xf4\xb6\x0e\x52\x10\x93\xc2\x87\x50\x4f\xaf\x0c\x8e\xa9\x2e\x2b\xe6\x74\x42\x57\x46\xeb\x8d\xa3\x2f\xfd\x02\x00\x00\xff\xff\x49\x24\xa9\x3b\x93\x00\x00\x00"), - }, - "/src/unicode": &vfsgen۰DirInfo{ - name: "unicode", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225492852, time.UTC), - }, - "/src/unicode/unicode.go": &vfsgen۰CompressedFileInfo{ - name: "unicode.go", - modTime: time.Date(2019, 1, 3, 14, 55, 7, 225540323, time.UTC), - uncompressedSize: 658, - - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\x91\x41\x8f\xd3\x30\x10\x85\xcf\xf6\xaf\x78\xa7\x28\x51\xba\x64\xcb\x71\xd5\x72\x29\x12\x08\xb1\x97\x72\xac\x0a\xf2\x3a\x93\xc6\xe0\xd8\xd6\xc4\x91\x40\xdb\xfe\x77\x64\x27\x0d\xcb\xcd\x9e\x79\xf3\x66\xe6\x9b\xa6\x41\xfd\x32\x19\xdb\xe2\xe7\x28\x65\x50\xfa\x97\xba\x10\x26\x67\xb4\x6f\x49\xca\x6e\x72\x1a\xd1\x97\x3f\xb4\x1a\x09\xc6\xc5\x0d\x18\x3c\x39\xda\x20\x45\x8e\xca\x5d\x08\xa7\xf3\xe1\xfe\xae\x50\x0e\x2a\x04\x6a\x8f\x93\xa3\x45\xd8\xf9\xc9\xb5\xcf\x2a\x04\xe3\x2e\x78\xf1\xde\x56\x78\x95\xc2\x74\x98\x4d\x77\x78\xc4\xf5\x8a\x67\xf5\xfb\x90\xbf\xfb\x25\xfe\x2a\x85\x60\x8a\x13\x3b\x1c\x29\x58\xa5\x69\x20\x17\x0f\xbd\xe2\x0d\x3a\x65\x47\x92\xe2\x26\x85\xf5\x78\xda\xe3\x51\x8a\xde\xa4\x87\x25\x57\xae\x83\x55\x52\x74\x9e\x61\x3d\x76\xe8\x4d\x36\x1c\xb2\xc8\xa3\x46\xd9\x9b\x07\xeb\xab\xe6\xbd\x14\x42\x73\x0a\x17\x6b\xe1\x69\x38\xa3\x69\x10\x88\x3b\xcf\x83\x72\x9a\xa0\xd9\x44\xa3\x95\x45\x72\xfc\xe4\x43\x4f\xfc\xe5\xdb\x13\x2e\x14\xa1\xda\x96\x69\x1c\xd1\x13\x27\x44\x63\x24\xd5\xc2\x77\xd0\x3e\xfc\x49\x2b\xc7\x9e\xb0\x02\x92\x22\x6d\x9e\xc0\x94\x9a\xdf\x7d\xf5\x55\x5a\x98\x51\x14\xe0\xfc\x5a\x12\x9f\x4d\x86\x24\x44\x4b\x36\xaa\x34\xdd\x3d\xf3\x31\x05\x4e\x19\xd1\xb9\x4a\x0a\xd3\x61\x16\x7d\x48\x0c\x33\xf7\x5c\x79\x87\xf7\xb6\x57\x8d\xb2\xe4\x87\x37\x91\xaa\xf8\xbe\xc5\x75\xd6\x64\xcf\x62\x5b\x55\x1b\x44\x9e\xd2\xa4\x09\xf0\x3f\x1f\xd4\x73\xa3\x35\x7d\x5b\x96\xc1\xee\xbf\x26\xb9\x7b\x6f\xb0\xc7\x90\x44\x20\xbb\x5c\x33\x1d\x6b\x8f\x01\x35\xb6\x73\xf5\x4d\xae\xe6\xf7\x9b\xde\xe4\xdf\x00\x00\x00\xff\xff\x20\xe3\x22\xd1\x92\x02\x00\x00"), - }, - } - fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src"].(os.FileInfo), - } - fs["/src"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/bytes"].(os.FileInfo), - fs["/src/crypto"].(os.FileInfo), - fs["/src/database"].(os.FileInfo), - fs["/src/debug"].(os.FileInfo), - fs["/src/encoding"].(os.FileInfo), - fs["/src/fmt"].(os.FileInfo), - fs["/src/go"].(os.FileInfo), - fs["/src/internal"].(os.FileInfo), - fs["/src/io"].(os.FileInfo), - fs["/src/math"].(os.FileInfo), - fs["/src/net"].(os.FileInfo), - fs["/src/os"].(os.FileInfo), - fs["/src/reflect"].(os.FileInfo), - fs["/src/regexp"].(os.FileInfo), - fs["/src/runtime"].(os.FileInfo), - fs["/src/strings"].(os.FileInfo), - fs["/src/sync"].(os.FileInfo), - fs["/src/syscall"].(os.FileInfo), - fs["/src/testing"].(os.FileInfo), - fs["/src/text"].(os.FileInfo), - fs["/src/time"].(os.FileInfo), - fs["/src/unicode"].(os.FileInfo), - } - fs["/src/bytes"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/bytes/bytes.go"].(os.FileInfo), - fs["/src/bytes/bytes_test.go"].(os.FileInfo), - } - fs["/src/crypto"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/crypto/internal"].(os.FileInfo), - fs["/src/crypto/rand"].(os.FileInfo), - fs["/src/crypto/x509"].(os.FileInfo), - } - fs["/src/crypto/internal"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/crypto/internal/subtle"].(os.FileInfo), - } - fs["/src/crypto/internal/subtle"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/crypto/internal/subtle/aliasing.go"].(os.FileInfo), - } - fs["/src/crypto/rand"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/crypto/rand/rand.go"].(os.FileInfo), - } - fs["/src/crypto/x509"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/crypto/x509/x509.go"].(os.FileInfo), - fs["/src/crypto/x509/x509_test.go"].(os.FileInfo), - } - fs["/src/database"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/database/sql"].(os.FileInfo), - } - fs["/src/database/sql"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/database/sql/driver"].(os.FileInfo), - } - fs["/src/database/sql/driver"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/database/sql/driver/driver_test.go"].(os.FileInfo), - } - fs["/src/debug"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/debug/elf"].(os.FileInfo), - } - fs["/src/debug/elf"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/debug/elf/elf_test.go"].(os.FileInfo), - } - fs["/src/encoding"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/encoding/gob"].(os.FileInfo), - fs["/src/encoding/json"].(os.FileInfo), - } - fs["/src/encoding/gob"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/encoding/gob/gob_test.go"].(os.FileInfo), - } - fs["/src/encoding/json"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/encoding/json/stream_test.go"].(os.FileInfo), - } - fs["/src/fmt"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/fmt/fmt_test.go"].(os.FileInfo), - } - fs["/src/go"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/go/token"].(os.FileInfo), - } - fs["/src/go/token"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/go/token/token_test.go"].(os.FileInfo), - } - fs["/src/internal"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/bytealg"].(os.FileInfo), - fs["/src/internal/cpu"].(os.FileInfo), - fs["/src/internal/fmtsort"].(os.FileInfo), - fs["/src/internal/poll"].(os.FileInfo), - fs["/src/internal/syscall"].(os.FileInfo), - fs["/src/internal/testenv"].(os.FileInfo), - } - fs["/src/internal/bytealg"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/bytealg/bytealg.go"].(os.FileInfo), - } - fs["/src/internal/cpu"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/cpu/cpu.go"].(os.FileInfo), - } - fs["/src/internal/fmtsort"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/fmtsort/fmtsort_test.go"].(os.FileInfo), - } - fs["/src/internal/poll"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/poll/fd_poll.go"].(os.FileInfo), - } - fs["/src/internal/syscall"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/syscall/unix"].(os.FileInfo), - } - fs["/src/internal/syscall/unix"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/syscall/unix/unix.go"].(os.FileInfo), - } - fs["/src/internal/testenv"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/internal/testenv/testenv.go"].(os.FileInfo), - } - fs["/src/io"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/io/io_test.go"].(os.FileInfo), - } - fs["/src/math"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/math/big"].(os.FileInfo), - fs["/src/math/bits"].(os.FileInfo), - fs["/src/math/math.go"].(os.FileInfo), - fs["/src/math/math_test.go"].(os.FileInfo), - fs["/src/math/rand"].(os.FileInfo), - } - fs["/src/math/big"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/math/big/big.go"].(os.FileInfo), - fs["/src/math/big/big_test.go"].(os.FileInfo), - } - fs["/src/math/bits"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/math/bits/bits.go"].(os.FileInfo), - } - fs["/src/math/rand"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/math/rand/rand_test.go"].(os.FileInfo), - } - fs["/src/net"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/net/http"].(os.FileInfo), - fs["/src/net/net.go"].(os.FileInfo), - } - fs["/src/net/http"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/net/http/cookiejar"].(os.FileInfo), - fs["/src/net/http/fetch.go"].(os.FileInfo), - fs["/src/net/http/http.go"].(os.FileInfo), - } - fs["/src/net/http/cookiejar"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/net/http/cookiejar/example_test.go"].(os.FileInfo), - } - fs["/src/os"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/os/os.go"].(os.FileInfo), - fs["/src/os/signal"].(os.FileInfo), - } - fs["/src/os/signal"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/os/signal/signal.go"].(os.FileInfo), - } - fs["/src/reflect"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/reflect/example_test.go"].(os.FileInfo), - fs["/src/reflect/reflect.go"].(os.FileInfo), - fs["/src/reflect/reflect_go111.go"].(os.FileInfo), - fs["/src/reflect/reflect_go1111.go"].(os.FileInfo), - fs["/src/reflect/reflect_test.go"].(os.FileInfo), - fs["/src/reflect/swapper.go"].(os.FileInfo), - } - fs["/src/regexp"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/regexp/regexp_test.go"].(os.FileInfo), - } - fs["/src/runtime"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/runtime/debug"].(os.FileInfo), - fs["/src/runtime/pprof"].(os.FileInfo), - fs["/src/runtime/runtime.go"].(os.FileInfo), - } - fs["/src/runtime/debug"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/runtime/debug/debug.go"].(os.FileInfo), - } - fs["/src/runtime/pprof"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/runtime/pprof/pprof.go"].(os.FileInfo), - } - fs["/src/strings"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/strings/strings.go"].(os.FileInfo), - fs["/src/strings/strings_test.go"].(os.FileInfo), - } - fs["/src/sync"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/sync/atomic"].(os.FileInfo), - fs["/src/sync/cond.go"].(os.FileInfo), - fs["/src/sync/export_test.go"].(os.FileInfo), - fs["/src/sync/pool.go"].(os.FileInfo), - fs["/src/sync/sync.go"].(os.FileInfo), - fs["/src/sync/sync_test.go"].(os.FileInfo), - fs["/src/sync/waitgroup.go"].(os.FileInfo), - } - fs["/src/sync/atomic"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/sync/atomic/atomic.go"].(os.FileInfo), - fs["/src/sync/atomic/atomic_test.go"].(os.FileInfo), - } - fs["/src/syscall"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/syscall/syscall.go"].(os.FileInfo), - fs["/src/syscall/syscall_darwin.go"].(os.FileInfo), - fs["/src/syscall/syscall_linux.go"].(os.FileInfo), - fs["/src/syscall/syscall_nonlinux.go"].(os.FileInfo), - fs["/src/syscall/syscall_unix.go"].(os.FileInfo), - fs["/src/syscall/syscall_windows.go"].(os.FileInfo), - } - fs["/src/testing"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/testing/example.go"].(os.FileInfo), - fs["/src/testing/ioutil.go"].(os.FileInfo), - fs["/src/testing/testing.go"].(os.FileInfo), - } - fs["/src/text"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/text/template"].(os.FileInfo), - } - fs["/src/text/template"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/text/template/template.go"].(os.FileInfo), - } - fs["/src/time"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/time/time.go"].(os.FileInfo), - fs["/src/time/time_test.go"].(os.FileInfo), - } - fs["/src/unicode"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ - fs["/src/unicode/unicode.go"].(os.FileInfo), - } - - return fs -}() - -type vfsgen۰FS map[string]interface{} - -func (fs vfsgen۰FS) Open(path string) (http.File, error) { - path = pathpkg.Clean("/" + path) - f, ok := fs[path] - if !ok { - return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} - } - - switch f := f.(type) { - case *vfsgen۰CompressedFileInfo: - gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) - if err != nil { - // This should never happen because we generate the gzip bytes such that they are always valid. - panic("unexpected error reading own gzip compressed bytes: " + err.Error()) - } - return &vfsgen۰CompressedFile{ - vfsgen۰CompressedFileInfo: f, - gr: gr, - }, nil - case *vfsgen۰FileInfo: - return &vfsgen۰File{ - vfsgen۰FileInfo: f, - Reader: bytes.NewReader(f.content), - }, nil - case *vfsgen۰DirInfo: - return &vfsgen۰Dir{ - vfsgen۰DirInfo: f, - }, nil - default: - // This should never happen because we generate only the above types. - panic(fmt.Sprintf("unexpected type %T", f)) - } -} - -// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. -type vfsgen۰CompressedFileInfo struct { - name string - modTime time.Time - compressedContent []byte - uncompressedSize int64 -} - -func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { - return nil, fmt.Errorf("cannot Readdir from file %s", f.name) -} -func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } - -func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { - return f.compressedContent -} - -func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } -func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } -func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } -func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } -func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } -func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } - -// vfsgen۰CompressedFile is an opened compressedFile instance. -type vfsgen۰CompressedFile struct { - *vfsgen۰CompressedFileInfo - gr *gzip.Reader - grPos int64 // Actual gr uncompressed position. - seekPos int64 // Seek uncompressed position. -} - -func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { - if f.grPos > f.seekPos { - // Rewind to beginning. - err = f.gr.Reset(bytes.NewReader(f.compressedContent)) - if err != nil { - return 0, err - } - f.grPos = 0 - } - if f.grPos < f.seekPos { - // Fast-forward. - _, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos) - if err != nil { - return 0, err - } - f.grPos = f.seekPos - } - n, err = f.gr.Read(p) - f.grPos += int64(n) - f.seekPos = f.grPos - return n, err -} -func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { - switch whence { - case io.SeekStart: - f.seekPos = 0 + offset - case io.SeekCurrent: - f.seekPos += offset - case io.SeekEnd: - f.seekPos = f.uncompressedSize + offset - default: - panic(fmt.Errorf("invalid whence value: %v", whence)) - } - return f.seekPos, nil -} -func (f *vfsgen۰CompressedFile) Close() error { - return f.gr.Close() -} - -// vfsgen۰FileInfo is a static definition of an uncompressed file (because it's not worth gzip compressing). -type vfsgen۰FileInfo struct { - name string - modTime time.Time - content []byte -} - -func (f *vfsgen۰FileInfo) Readdir(count int) ([]os.FileInfo, error) { - return nil, fmt.Errorf("cannot Readdir from file %s", f.name) -} -func (f *vfsgen۰FileInfo) Stat() (os.FileInfo, error) { return f, nil } - -func (f *vfsgen۰FileInfo) NotWorthGzipCompressing() {} - -func (f *vfsgen۰FileInfo) Name() string { return f.name } -func (f *vfsgen۰FileInfo) Size() int64 { return int64(len(f.content)) } -func (f *vfsgen۰FileInfo) Mode() os.FileMode { return 0444 } -func (f *vfsgen۰FileInfo) ModTime() time.Time { return f.modTime } -func (f *vfsgen۰FileInfo) IsDir() bool { return false } -func (f *vfsgen۰FileInfo) Sys() interface{} { return nil } - -// vfsgen۰File is an opened file instance. -type vfsgen۰File struct { - *vfsgen۰FileInfo - *bytes.Reader -} - -func (f *vfsgen۰File) Close() error { - return nil -} - -// vfsgen۰DirInfo is a static definition of a directory. -type vfsgen۰DirInfo struct { - name string - modTime time.Time - entries []os.FileInfo -} - -func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { - return 0, fmt.Errorf("cannot Read from directory %s", d.name) -} -func (d *vfsgen۰DirInfo) Close() error { return nil } -func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } - -func (d *vfsgen۰DirInfo) Name() string { return d.name } -func (d *vfsgen۰DirInfo) Size() int64 { return 0 } -func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } -func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } -func (d *vfsgen۰DirInfo) IsDir() bool { return true } -func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } - -// vfsgen۰Dir is an opened dir instance. -type vfsgen۰Dir struct { - *vfsgen۰DirInfo - pos int // Position within entries for Seek and Readdir. -} - -func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { - if offset == 0 && whence == io.SeekStart { - d.pos = 0 - return 0, nil - } - return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) -} - -func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { - if d.pos >= len(d.entries) && count > 0 { - return nil, io.EOF - } - if count <= 0 || count > len(d.entries)-d.pos { - count = len(d.entries) - d.pos - } - e := d.entries[d.pos : d.pos+count] - d.pos += count - return e, nil -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/package.go b/vendor/github.com/gopherjs/gopherjs/compiler/package.go deleted file mode 100644 index f841b145b..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/package.go +++ /dev/null @@ -1,809 +0,0 @@ -package compiler - -import ( - "bytes" - "encoding/json" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "sort" - "strings" - - "github.com/gopherjs/gopherjs/compiler/analysis" - "github.com/neelance/astrewrite" - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/go/types/typeutil" -) - -type pkgContext struct { - *analysis.Info - additionalSelections map[*ast.SelectorExpr]selection - - typeNames []*types.TypeName - pkgVars map[string]string - objectNames map[types.Object]string - varPtrNames map[*types.Var]string - anonTypes []*types.TypeName - anonTypeMap typeutil.Map - escapingVars map[*types.Var]bool - indentation int - dependencies map[types.Object]bool - minify bool - fileSet *token.FileSet - errList ErrorList -} - -func (p *pkgContext) SelectionOf(e *ast.SelectorExpr) (selection, bool) { - if sel, ok := p.Selections[e]; ok { - return sel, true - } - if sel, ok := p.additionalSelections[e]; ok { - return sel, true - } - return nil, false -} - -type selection interface { - Kind() types.SelectionKind - Recv() types.Type - Index() []int - Obj() types.Object - Type() types.Type -} - -type fakeSelection struct { - kind types.SelectionKind - recv types.Type - index []int - obj types.Object - typ types.Type -} - -func (sel *fakeSelection) Kind() types.SelectionKind { return sel.kind } -func (sel *fakeSelection) Recv() types.Type { return sel.recv } -func (sel *fakeSelection) Index() []int { return sel.index } -func (sel *fakeSelection) Obj() types.Object { return sel.obj } -func (sel *fakeSelection) Type() types.Type { return sel.typ } - -type funcContext struct { - *analysis.FuncInfo - p *pkgContext - parent *funcContext - sig *types.Signature - allVars map[string]int - localVars []string - resultNames []ast.Expr - flowDatas map[*types.Label]*flowData - caseCounter int - labelCases map[*types.Label]int - output []byte - delayedOutput []byte - posAvailable bool - pos token.Pos -} - -type flowData struct { - postStmt func() - beginCase int - endCase int -} - -type ImportContext struct { - Packages map[string]*types.Package - Import func(string) (*Archive, error) -} - -// packageImporter implements go/types.Importer interface. -type packageImporter struct { - importContext *ImportContext - importError *error // A pointer to importError in Compile. -} - -func (pi packageImporter) Import(path string) (*types.Package, error) { - if path == "unsafe" { - return types.Unsafe, nil - } - - a, err := pi.importContext.Import(path) - if err != nil { - if *pi.importError == nil { - // If import failed, show first error of import only (https://github.com/gopherjs/gopherjs/issues/119). - *pi.importError = err - } - return nil, err - } - - return pi.importContext.Packages[a.ImportPath], nil -} - -func Compile(importPath string, files []*ast.File, fileSet *token.FileSet, importContext *ImportContext, minify bool) (*Archive, error) { - typesInfo := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - Scopes: make(map[ast.Node]*types.Scope), - } - - var importError error - var errList ErrorList - var previousErr error - config := &types.Config{ - Importer: packageImporter{ - importContext: importContext, - importError: &importError, - }, - Sizes: sizes32, - Error: func(err error) { - if previousErr != nil && previousErr.Error() == err.Error() { - return - } - errList = append(errList, err) - previousErr = err - }, - } - typesPkg, err := config.Check(importPath, fileSet, files, typesInfo) - if importError != nil { - return nil, importError - } - if errList != nil { - if len(errList) > 10 { - pos := token.NoPos - if last, ok := errList[9].(types.Error); ok { - pos = last.Pos - } - errList = append(errList[:10], types.Error{Fset: fileSet, Pos: pos, Msg: "too many errors"}) - } - return nil, errList - } - if err != nil { - return nil, err - } - importContext.Packages[importPath] = typesPkg - - exportData := new(bytes.Buffer) - if err := gcexportdata.Write(exportData, nil, typesPkg); err != nil { - return nil, fmt.Errorf("failed to write export data: %v", err) - } - encodedFileSet := new(bytes.Buffer) - if err := fileSet.Write(json.NewEncoder(encodedFileSet).Encode); err != nil { - return nil, err - } - - simplifiedFiles := make([]*ast.File, len(files)) - for i, file := range files { - simplifiedFiles[i] = astrewrite.Simplify(file, typesInfo, false) - } - - isBlocking := func(f *types.Func) bool { - archive, err := importContext.Import(f.Pkg().Path()) - if err != nil { - panic(err) - } - fullName := f.FullName() - for _, d := range archive.Declarations { - if string(d.FullName) == fullName { - return d.Blocking - } - } - panic(fullName) - } - pkgInfo := analysis.AnalyzePkg(simplifiedFiles, fileSet, typesInfo, typesPkg, isBlocking) - c := &funcContext{ - FuncInfo: pkgInfo.InitFuncInfo, - p: &pkgContext{ - Info: pkgInfo, - additionalSelections: make(map[*ast.SelectorExpr]selection), - - pkgVars: make(map[string]string), - objectNames: make(map[types.Object]string), - varPtrNames: make(map[*types.Var]string), - escapingVars: make(map[*types.Var]bool), - indentation: 1, - dependencies: make(map[types.Object]bool), - minify: minify, - fileSet: fileSet, - }, - allVars: make(map[string]int), - flowDatas: map[*types.Label]*flowData{nil: {}}, - caseCounter: 1, - labelCases: make(map[*types.Label]int), - } - for name := range reservedKeywords { - c.allVars[name] = 1 - } - - // imports - var importDecls []*Decl - var importedPaths []string - for _, importedPkg := range typesPkg.Imports() { - if importedPkg == types.Unsafe { - // Prior to Go 1.9, unsafe import was excluded by Imports() method, - // but now we do it here to maintain previous behavior. - continue - } - c.p.pkgVars[importedPkg.Path()] = c.newVariableWithLevel(importedPkg.Name(), true) - importedPaths = append(importedPaths, importedPkg.Path()) - } - sort.Strings(importedPaths) - for _, impPath := range importedPaths { - id := c.newIdent(fmt.Sprintf(`%s.$init`, c.p.pkgVars[impPath]), types.NewSignature(nil, nil, nil, false)) - call := &ast.CallExpr{Fun: id} - c.Blocking[call] = true - c.Flattened[call] = true - importDecls = append(importDecls, &Decl{ - Vars: []string{c.p.pkgVars[impPath]}, - DeclCode: []byte(fmt.Sprintf("\t%s = $packages[\"%s\"];\n", c.p.pkgVars[impPath], impPath)), - InitCode: c.CatchOutput(1, func() { c.translateStmt(&ast.ExprStmt{X: call}, nil) }), - }) - } - - var functions []*ast.FuncDecl - var vars []*types.Var - for _, file := range simplifiedFiles { - for _, decl := range file.Decls { - switch d := decl.(type) { - case *ast.FuncDecl: - sig := c.p.Defs[d.Name].(*types.Func).Type().(*types.Signature) - var recvType types.Type - if sig.Recv() != nil { - recvType = sig.Recv().Type() - if ptr, isPtr := recvType.(*types.Pointer); isPtr { - recvType = ptr.Elem() - } - } - if sig.Recv() == nil { - c.objectName(c.p.Defs[d.Name].(*types.Func)) // register toplevel name - } - if !isBlank(d.Name) { - functions = append(functions, d) - } - case *ast.GenDecl: - switch d.Tok { - case token.TYPE: - for _, spec := range d.Specs { - o := c.p.Defs[spec.(*ast.TypeSpec).Name].(*types.TypeName) - c.p.typeNames = append(c.p.typeNames, o) - c.objectName(o) // register toplevel name - } - case token.VAR: - for _, spec := range d.Specs { - for _, name := range spec.(*ast.ValueSpec).Names { - if !isBlank(name) { - o := c.p.Defs[name].(*types.Var) - vars = append(vars, o) - c.objectName(o) // register toplevel name - } - } - } - case token.CONST: - // skip, constants are inlined - } - } - } - } - - collectDependencies := func(f func()) []string { - c.p.dependencies = make(map[types.Object]bool) - f() - var deps []string - for o := range c.p.dependencies { - qualifiedName := o.Pkg().Path() + "." + o.Name() - if f, ok := o.(*types.Func); ok && f.Type().(*types.Signature).Recv() != nil { - deps = append(deps, qualifiedName+"~") - continue - } - deps = append(deps, qualifiedName) - } - sort.Strings(deps) - return deps - } - - // variables - var varDecls []*Decl - varsWithInit := make(map[*types.Var]bool) - for _, init := range c.p.InitOrder { - for _, o := range init.Lhs { - varsWithInit[o] = true - } - } - for _, o := range vars { - var d Decl - if !o.Exported() { - d.Vars = []string{c.objectName(o)} - } - if c.p.HasPointer[o] && !o.Exported() { - d.Vars = append(d.Vars, c.varPtrName(o)) - } - if _, ok := varsWithInit[o]; !ok { - d.DceDeps = collectDependencies(func() { - d.InitCode = []byte(fmt.Sprintf("\t\t%s = %s;\n", c.objectName(o), c.translateExpr(c.zeroValue(o.Type())).String())) - }) - } - d.DceObjectFilter = o.Name() - varDecls = append(varDecls, &d) - } - for _, init := range c.p.InitOrder { - lhs := make([]ast.Expr, len(init.Lhs)) - for i, o := range init.Lhs { - ident := ast.NewIdent(o.Name()) - c.p.Defs[ident] = o - lhs[i] = c.setType(ident, o.Type()) - varsWithInit[o] = true - } - var d Decl - d.DceDeps = collectDependencies(func() { - c.localVars = nil - d.InitCode = c.CatchOutput(1, func() { - c.translateStmt(&ast.AssignStmt{ - Lhs: lhs, - Tok: token.DEFINE, - Rhs: []ast.Expr{init.Rhs}, - }, nil) - }) - d.Vars = append(d.Vars, c.localVars...) - }) - if len(init.Lhs) == 1 { - if !analysis.HasSideEffect(init.Rhs, c.p.Info.Info) { - d.DceObjectFilter = init.Lhs[0].Name() - } - } - varDecls = append(varDecls, &d) - } - - // functions - var funcDecls []*Decl - var mainFunc *types.Func - for _, fun := range functions { - o := c.p.Defs[fun.Name].(*types.Func) - funcInfo := c.p.FuncDeclInfos[o] - d := Decl{ - FullName: o.FullName(), - Blocking: len(funcInfo.Blocking) != 0, - } - if fun.Recv == nil { - d.Vars = []string{c.objectName(o)} - d.DceObjectFilter = o.Name() - switch o.Name() { - case "main": - mainFunc = o - d.DceObjectFilter = "" - case "init": - d.InitCode = c.CatchOutput(1, func() { - id := c.newIdent("", types.NewSignature(nil, nil, nil, false)) - c.p.Uses[id] = o - call := &ast.CallExpr{Fun: id} - if len(c.p.FuncDeclInfos[o].Blocking) != 0 { - c.Blocking[call] = true - } - c.translateStmt(&ast.ExprStmt{X: call}, nil) - }) - d.DceObjectFilter = "" - } - } - if fun.Recv != nil { - recvType := o.Type().(*types.Signature).Recv().Type() - ptr, isPointer := recvType.(*types.Pointer) - namedRecvType, _ := recvType.(*types.Named) - if isPointer { - namedRecvType = ptr.Elem().(*types.Named) - } - d.DceObjectFilter = namedRecvType.Obj().Name() - if !fun.Name.IsExported() { - d.DceMethodFilter = o.Name() + "~" - } - } - - d.DceDeps = collectDependencies(func() { - d.DeclCode = c.translateToplevelFunction(fun, funcInfo) - }) - funcDecls = append(funcDecls, &d) - } - if typesPkg.Name() == "main" { - if mainFunc == nil { - return nil, fmt.Errorf("missing main function") - } - id := c.newIdent("", types.NewSignature(nil, nil, nil, false)) - c.p.Uses[id] = mainFunc - call := &ast.CallExpr{Fun: id} - ifStmt := &ast.IfStmt{ - Cond: c.newIdent("$pkg === $mainPkg", types.Typ[types.Bool]), - Body: &ast.BlockStmt{ - List: []ast.Stmt{ - &ast.ExprStmt{X: call}, - &ast.AssignStmt{ - Lhs: []ast.Expr{c.newIdent("$mainFinished", types.Typ[types.Bool])}, - Tok: token.ASSIGN, - Rhs: []ast.Expr{c.newConst(types.Typ[types.Bool], constant.MakeBool(true))}, - }, - }, - }, - } - if len(c.p.FuncDeclInfos[mainFunc].Blocking) != 0 { - c.Blocking[call] = true - c.Flattened[ifStmt] = true - } - funcDecls = append(funcDecls, &Decl{ - InitCode: c.CatchOutput(1, func() { - c.translateStmt(ifStmt, nil) - }), - }) - } - - // named types - var typeDecls []*Decl - for _, o := range c.p.typeNames { - if o.IsAlias() { - continue - } - typeName := c.objectName(o) - d := Decl{ - Vars: []string{typeName}, - DceObjectFilter: o.Name(), - } - d.DceDeps = collectDependencies(func() { - d.DeclCode = c.CatchOutput(0, func() { - typeName := c.objectName(o) - lhs := typeName - if isPkgLevel(o) { - lhs += " = $pkg." + encodeIdent(o.Name()) - } - size := int64(0) - constructor := "null" - switch t := o.Type().Underlying().(type) { - case *types.Struct: - params := make([]string, t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - params[i] = fieldName(t, i) + "_" - } - constructor = fmt.Sprintf("function(%s) {\n\t\tthis.$val = this;\n\t\tif (arguments.length === 0) {\n", strings.Join(params, ", ")) - for i := 0; i < t.NumFields(); i++ { - constructor += fmt.Sprintf("\t\t\tthis.%s = %s;\n", fieldName(t, i), c.translateExpr(c.zeroValue(t.Field(i).Type())).String()) - } - constructor += "\t\t\treturn;\n\t\t}\n" - for i := 0; i < t.NumFields(); i++ { - constructor += fmt.Sprintf("\t\tthis.%[1]s = %[1]s_;\n", fieldName(t, i)) - } - constructor += "\t}" - case *types.Basic, *types.Array, *types.Slice, *types.Chan, *types.Signature, *types.Interface, *types.Pointer, *types.Map: - size = sizes32.Sizeof(t) - } - c.Printf(`%s = $newType(%d, %s, "%s.%s", %t, "%s", %t, %s);`, lhs, size, typeKind(o.Type()), o.Pkg().Name(), o.Name(), o.Name() != "", o.Pkg().Path(), o.Exported(), constructor) - }) - d.MethodListCode = c.CatchOutput(0, func() { - named := o.Type().(*types.Named) - if _, ok := named.Underlying().(*types.Interface); ok { - return - } - var methods []string - var ptrMethods []string - for i := 0; i < named.NumMethods(); i++ { - method := named.Method(i) - name := method.Name() - if reservedKeywords[name] { - name += "$" - } - pkgPath := "" - if !method.Exported() { - pkgPath = method.Pkg().Path() - } - t := method.Type().(*types.Signature) - entry := fmt.Sprintf(`{prop: "%s", name: %s, pkg: "%s", typ: $funcType(%s)}`, name, encodeString(method.Name()), pkgPath, c.initArgs(t)) - if _, isPtr := t.Recv().Type().(*types.Pointer); isPtr { - ptrMethods = append(ptrMethods, entry) - continue - } - methods = append(methods, entry) - } - if len(methods) > 0 { - c.Printf("%s.methods = [%s];", c.typeName(named), strings.Join(methods, ", ")) - } - if len(ptrMethods) > 0 { - c.Printf("%s.methods = [%s];", c.typeName(types.NewPointer(named)), strings.Join(ptrMethods, ", ")) - } - }) - switch t := o.Type().Underlying().(type) { - case *types.Array, *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Slice, *types.Signature, *types.Struct: - d.TypeInitCode = c.CatchOutput(0, func() { - c.Printf("%s.init(%s);", c.objectName(o), c.initArgs(t)) - }) - } - }) - typeDecls = append(typeDecls, &d) - } - - // anonymous types - for _, t := range c.p.anonTypes { - d := Decl{ - Vars: []string{t.Name()}, - DceObjectFilter: t.Name(), - } - d.DceDeps = collectDependencies(func() { - d.DeclCode = []byte(fmt.Sprintf("\t%s = $%sType(%s);\n", t.Name(), strings.ToLower(typeKind(t.Type())[5:]), c.initArgs(t.Type()))) - }) - typeDecls = append(typeDecls, &d) - } - - var allDecls []*Decl - for _, d := range append(append(append(importDecls, typeDecls...), varDecls...), funcDecls...) { - d.DeclCode = removeWhitespace(d.DeclCode, minify) - d.MethodListCode = removeWhitespace(d.MethodListCode, minify) - d.TypeInitCode = removeWhitespace(d.TypeInitCode, minify) - d.InitCode = removeWhitespace(d.InitCode, minify) - allDecls = append(allDecls, d) - } - - if len(c.p.errList) != 0 { - return nil, c.p.errList - } - - return &Archive{ - ImportPath: importPath, - Name: typesPkg.Name(), - Imports: importedPaths, - ExportData: exportData.Bytes(), - Declarations: allDecls, - FileSet: encodedFileSet.Bytes(), - Minified: minify, - }, nil -} - -func (c *funcContext) initArgs(ty types.Type) string { - switch t := ty.(type) { - case *types.Array: - return fmt.Sprintf("%s, %d", c.typeName(t.Elem()), t.Len()) - case *types.Chan: - return fmt.Sprintf("%s, %t, %t", c.typeName(t.Elem()), t.Dir()&types.SendOnly != 0, t.Dir()&types.RecvOnly != 0) - case *types.Interface: - methods := make([]string, t.NumMethods()) - for i := range methods { - method := t.Method(i) - pkgPath := "" - if !method.Exported() { - pkgPath = method.Pkg().Path() - } - methods[i] = fmt.Sprintf(`{prop: "%s", name: "%s", pkg: "%s", typ: $funcType(%s)}`, method.Name(), method.Name(), pkgPath, c.initArgs(method.Type())) - } - return fmt.Sprintf("[%s]", strings.Join(methods, ", ")) - case *types.Map: - return fmt.Sprintf("%s, %s", c.typeName(t.Key()), c.typeName(t.Elem())) - case *types.Pointer: - return fmt.Sprintf("%s", c.typeName(t.Elem())) - case *types.Slice: - return fmt.Sprintf("%s", c.typeName(t.Elem())) - case *types.Signature: - params := make([]string, t.Params().Len()) - for i := range params { - params[i] = c.typeName(t.Params().At(i).Type()) - } - results := make([]string, t.Results().Len()) - for i := range results { - results[i] = c.typeName(t.Results().At(i).Type()) - } - return fmt.Sprintf("[%s], [%s], %t", strings.Join(params, ", "), strings.Join(results, ", "), t.Variadic()) - case *types.Struct: - pkgPath := "" - fields := make([]string, t.NumFields()) - for i := range fields { - field := t.Field(i) - if !field.Exported() { - pkgPath = field.Pkg().Path() - } - fields[i] = fmt.Sprintf(`{prop: "%s", name: %s, embedded: %t, exported: %t, typ: %s, tag: %s}`, fieldName(t, i), encodeString(field.Name()), field.Anonymous(), field.Exported(), c.typeName(field.Type()), encodeString(t.Tag(i))) - } - return fmt.Sprintf(`"%s", [%s]`, pkgPath, strings.Join(fields, ", ")) - default: - panic("invalid type") - } -} - -func (c *funcContext) translateToplevelFunction(fun *ast.FuncDecl, info *analysis.FuncInfo) []byte { - o := c.p.Defs[fun.Name].(*types.Func) - sig := o.Type().(*types.Signature) - var recv *ast.Ident - if fun.Recv != nil && fun.Recv.List[0].Names != nil { - recv = fun.Recv.List[0].Names[0] - } - - var joinedParams string - primaryFunction := func(funcRef string) []byte { - if fun.Body == nil { - return []byte(fmt.Sprintf("\t%s = function() {\n\t\t$throwRuntimeError(\"native function not implemented: %s\");\n\t};\n", funcRef, o.FullName())) - } - - params, fun := translateFunction(fun.Type, recv, fun.Body, c, sig, info, funcRef) - joinedParams = strings.Join(params, ", ") - return []byte(fmt.Sprintf("\t%s = %s;\n", funcRef, fun)) - } - - code := bytes.NewBuffer(nil) - - if fun.Recv == nil { - funcRef := c.objectName(o) - code.Write(primaryFunction(funcRef)) - if fun.Name.IsExported() { - fmt.Fprintf(code, "\t$pkg.%s = %s;\n", encodeIdent(fun.Name.Name), funcRef) - } - return code.Bytes() - } - - recvType := sig.Recv().Type() - ptr, isPointer := recvType.(*types.Pointer) - namedRecvType, _ := recvType.(*types.Named) - if isPointer { - namedRecvType = ptr.Elem().(*types.Named) - } - typeName := c.objectName(namedRecvType.Obj()) - funName := fun.Name.Name - if reservedKeywords[funName] { - funName += "$" - } - - if _, isStruct := namedRecvType.Underlying().(*types.Struct); isStruct { - code.Write(primaryFunction(typeName + ".ptr.prototype." + funName)) - fmt.Fprintf(code, "\t%s.prototype.%s = function(%s) { return this.$val.%s(%s); };\n", typeName, funName, joinedParams, funName, joinedParams) - return code.Bytes() - } - - if isPointer { - if _, isArray := ptr.Elem().Underlying().(*types.Array); isArray { - code.Write(primaryFunction(typeName + ".prototype." + funName)) - fmt.Fprintf(code, "\t$ptrType(%s).prototype.%s = function(%s) { return (new %s(this.$get())).%s(%s); };\n", typeName, funName, joinedParams, typeName, funName, joinedParams) - return code.Bytes() - } - return primaryFunction(fmt.Sprintf("$ptrType(%s).prototype.%s", typeName, funName)) - } - - value := "this.$get()" - if isWrapped(recvType) { - value = fmt.Sprintf("new %s(%s)", typeName, value) - } - code.Write(primaryFunction(typeName + ".prototype." + funName)) - fmt.Fprintf(code, "\t$ptrType(%s).prototype.%s = function(%s) { return %s.%s(%s); };\n", typeName, funName, joinedParams, value, funName, joinedParams) - return code.Bytes() -} - -func translateFunction(typ *ast.FuncType, recv *ast.Ident, body *ast.BlockStmt, outerContext *funcContext, sig *types.Signature, info *analysis.FuncInfo, funcRef string) ([]string, string) { - if info == nil { - panic("nil info") - } - - c := &funcContext{ - FuncInfo: info, - p: outerContext.p, - parent: outerContext, - sig: sig, - allVars: make(map[string]int, len(outerContext.allVars)), - localVars: []string{}, - flowDatas: map[*types.Label]*flowData{nil: {}}, - caseCounter: 1, - labelCases: make(map[*types.Label]int), - } - for k, v := range outerContext.allVars { - c.allVars[k] = v - } - prevEV := c.p.escapingVars - - var params []string - for _, param := range typ.Params.List { - if len(param.Names) == 0 { - params = append(params, c.newVariable("param")) - continue - } - for _, ident := range param.Names { - if isBlank(ident) { - params = append(params, c.newVariable("param")) - continue - } - params = append(params, c.objectName(c.p.Defs[ident])) - } - } - - bodyOutput := string(c.CatchOutput(1, func() { - if len(c.Blocking) != 0 { - c.p.Scopes[body] = c.p.Scopes[typ] - c.handleEscapingVars(body) - } - - if c.sig != nil && c.sig.Results().Len() != 0 && c.sig.Results().At(0).Name() != "" { - c.resultNames = make([]ast.Expr, c.sig.Results().Len()) - for i := 0; i < c.sig.Results().Len(); i++ { - result := c.sig.Results().At(i) - c.Printf("%s = %s;", c.objectName(result), c.translateExpr(c.zeroValue(result.Type())).String()) - id := ast.NewIdent("") - c.p.Uses[id] = result - c.resultNames[i] = c.setType(id, result.Type()) - } - } - - if recv != nil && !isBlank(recv) { - this := "this" - if isWrapped(c.p.TypeOf(recv)) { - this = "this.$val" - } - c.Printf("%s = %s;", c.translateExpr(recv), this) - } - - c.translateStmtList(body.List) - if len(c.Flattened) != 0 && !endsWithReturn(body.List) { - c.translateStmt(&ast.ReturnStmt{}, nil) - } - })) - - sort.Strings(c.localVars) - - var prefix, suffix, functionName string - - if len(c.Flattened) != 0 { - c.localVars = append(c.localVars, "$s") - prefix = prefix + " $s = 0;" - } - - if c.HasDefer { - c.localVars = append(c.localVars, "$deferred") - suffix = " }" + suffix - if len(c.Blocking) != 0 { - suffix = " }" + suffix - } - } - - if len(c.Blocking) != 0 { - c.localVars = append(c.localVars, "$r") - if funcRef == "" { - funcRef = "$b" - functionName = " $b" - } - var stores, loads string - for _, v := range c.localVars { - loads += fmt.Sprintf("%s = $f.%s; ", v, v) - stores += fmt.Sprintf("$f.%s = %s; ", v, v) - } - prefix = prefix + " var $f, $c = false; if (this !== undefined && this.$blk !== undefined) { $f = this; $c = true; " + loads + "}" - suffix = " if ($f === undefined) { $f = { $blk: " + funcRef + " }; } " + stores + "return $f;" + suffix - } - - if c.HasDefer { - prefix = prefix + " var $err = null; try {" - deferSuffix := " } catch(err) { $err = err;" - if len(c.Blocking) != 0 { - deferSuffix += " $s = -1;" - } - if c.resultNames == nil && c.sig.Results().Len() > 0 { - deferSuffix += fmt.Sprintf(" return%s;", c.translateResults(nil)) - } - deferSuffix += " } finally { $callDeferred($deferred, $err);" - if c.resultNames != nil { - deferSuffix += fmt.Sprintf(" if (!$curGoroutine.asleep) { return %s; }", c.translateResults(c.resultNames)) - } - if len(c.Blocking) != 0 { - deferSuffix += " if($curGoroutine.asleep) {" - } - suffix = deferSuffix + suffix - } - - if len(c.Flattened) != 0 { - prefix = prefix + " s: while (true) { switch ($s) { case 0:" - suffix = " } return; }" + suffix - } - - if c.HasDefer { - prefix = prefix + " $deferred = []; $deferred.index = $curGoroutine.deferStack.length; $curGoroutine.deferStack.push($deferred);" - } - - if prefix != "" { - bodyOutput = strings.Repeat("\t", c.p.indentation+1) + "/* */" + prefix + "\n" + bodyOutput - } - if suffix != "" { - bodyOutput = bodyOutput + strings.Repeat("\t", c.p.indentation+1) + "/* */" + suffix + "\n" - } - if len(c.localVars) != 0 { - bodyOutput = fmt.Sprintf("%svar %s;\n", strings.Repeat("\t", c.p.indentation+1), strings.Join(c.localVars, ", ")) + bodyOutput - } - - c.p.escapingVars = prevEV - - return params, fmt.Sprintf("function%s(%s) {\n%s%s}", functionName, strings.Join(params, ", "), bodyOutput, strings.Repeat("\t", c.p.indentation)) -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/genmin.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/genmin.go deleted file mode 100644 index 739dbf216..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/genmin.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "fmt" - "go/build" - "io/ioutil" - "log" - "os/exec" - "path/filepath" - "strings" - - "github.com/gopherjs/gopherjs/compiler/prelude" -) - -func main() { - if err := run(); err != nil { - log.Fatalln(err) - } -} - -func run() error { - bpkg, err := build.Import("github.com/gopherjs/gopherjs", "", build.FindOnly) - if err != nil { - return fmt.Errorf("failed to locate path for github.com/gopherjs/gopherjs: %v", err) - } - - preludeDir := filepath.Join(bpkg.Dir, "compiler", "prelude") - - args := []string{ - filepath.Join(bpkg.Dir, "node_modules", ".bin", "uglifyjs"), - "--config-file", - filepath.Join(preludeDir, "uglifyjs_options.json"), - } - - stderr := new(bytes.Buffer) - cmd := exec.Command(args[0], args[1:]...) - cmd.Stdin = strings.NewReader(prelude.Prelude) - cmd.Stderr = stderr - - out, err := cmd.Output() - if err != nil { - return fmt.Errorf("failed to run %v: %v\n%s", strings.Join(args, " "), err, stderr.String()) - } - - fn := "prelude_min.go" - - outStr := fmt.Sprintf(`// Code generated by genmin; DO NOT EDIT. - -package prelude - -// Minified is an uglifyjs-minified version of Prelude. -const Minified = %q -`, out) - - if err := ioutil.WriteFile(fn, []byte(outStr), 0644); err != nil { - return fmt.Errorf("failed to write to %v: %v", fn, err) - } - - return nil -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/goroutines.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/goroutines.go deleted file mode 100644 index d9780b65d..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/goroutines.go +++ /dev/null @@ -1,358 +0,0 @@ -package prelude - -const goroutines = ` -var $stackDepthOffset = 0; -var $getStackDepth = function() { - var err = new Error(); - if (err.stack === undefined) { - return undefined; - } - return $stackDepthOffset + err.stack.split("\n").length; -}; - -var $panicStackDepth = null, $panicValue; -var $callDeferred = function(deferred, jsErr, fromPanic) { - if (!fromPanic && deferred !== null && deferred.index >= $curGoroutine.deferStack.length) { - throw jsErr; - } - if (jsErr !== null) { - var newErr = null; - try { - $curGoroutine.deferStack.push(deferred); - $panic(new $jsErrorPtr(jsErr)); - } catch (err) { - newErr = err; - } - $curGoroutine.deferStack.pop(); - $callDeferred(deferred, newErr); - return; - } - if ($curGoroutine.asleep) { - return; - } - - $stackDepthOffset--; - var outerPanicStackDepth = $panicStackDepth; - var outerPanicValue = $panicValue; - - var localPanicValue = $curGoroutine.panicStack.pop(); - if (localPanicValue !== undefined) { - $panicStackDepth = $getStackDepth(); - $panicValue = localPanicValue; - } - - try { - while (true) { - if (deferred === null) { - deferred = $curGoroutine.deferStack[$curGoroutine.deferStack.length - 1]; - if (deferred === undefined) { - /* The panic reached the top of the stack. Clear it and throw it as a JavaScript error. */ - $panicStackDepth = null; - if (localPanicValue.Object instanceof Error) { - throw localPanicValue.Object; - } - var msg; - if (localPanicValue.constructor === $String) { - msg = localPanicValue.$val; - } else if (localPanicValue.Error !== undefined) { - msg = localPanicValue.Error(); - } else if (localPanicValue.String !== undefined) { - msg = localPanicValue.String(); - } else { - msg = localPanicValue; - } - throw new Error(msg); - } - } - var call = deferred.pop(); - if (call === undefined) { - $curGoroutine.deferStack.pop(); - if (localPanicValue !== undefined) { - deferred = null; - continue; - } - return; - } - var r = call[0].apply(call[2], call[1]); - if (r && r.$blk !== undefined) { - deferred.push([r.$blk, [], r]); - if (fromPanic) { - throw null; - } - return; - } - - if (localPanicValue !== undefined && $panicStackDepth === null) { - throw null; /* error was recovered */ - } - } - } finally { - if (localPanicValue !== undefined) { - if ($panicStackDepth !== null) { - $curGoroutine.panicStack.push(localPanicValue); - } - $panicStackDepth = outerPanicStackDepth; - $panicValue = outerPanicValue; - } - $stackDepthOffset++; - } -}; - -var $panic = function(value) { - $curGoroutine.panicStack.push(value); - $callDeferred(null, null, true); -}; -var $recover = function() { - if ($panicStackDepth === null || ($panicStackDepth !== undefined && $panicStackDepth !== $getStackDepth() - 2)) { - return $ifaceNil; - } - $panicStackDepth = null; - return $panicValue; -}; -var $throw = function(err) { throw err; }; - -var $noGoroutine = { asleep: false, exit: false, deferStack: [], panicStack: [] }; -var $curGoroutine = $noGoroutine, $totalGoroutines = 0, $awakeGoroutines = 0, $checkForDeadlock = true; -var $mainFinished = false; -var $go = function(fun, args) { - $totalGoroutines++; - $awakeGoroutines++; - var $goroutine = function() { - try { - $curGoroutine = $goroutine; - var r = fun.apply(undefined, args); - if (r && r.$blk !== undefined) { - fun = function() { return r.$blk(); }; - args = []; - return; - } - $goroutine.exit = true; - } catch (err) { - if (!$goroutine.exit) { - throw err; - } - } finally { - $curGoroutine = $noGoroutine; - if ($goroutine.exit) { /* also set by runtime.Goexit() */ - $totalGoroutines--; - $goroutine.asleep = true; - } - if ($goroutine.asleep) { - $awakeGoroutines--; - if (!$mainFinished && $awakeGoroutines === 0 && $checkForDeadlock) { - console.error("fatal error: all goroutines are asleep - deadlock!"); - if ($global.process !== undefined) { - $global.process.exit(2); - } - } - } - } - }; - $goroutine.asleep = false; - $goroutine.exit = false; - $goroutine.deferStack = []; - $goroutine.panicStack = []; - $schedule($goroutine); -}; - -var $scheduled = []; -var $runScheduled = function() { - try { - var r; - while ((r = $scheduled.shift()) !== undefined) { - r(); - } - } finally { - if ($scheduled.length > 0) { - setTimeout($runScheduled, 0); - } - } -}; - -var $schedule = function(goroutine) { - if (goroutine.asleep) { - goroutine.asleep = false; - $awakeGoroutines++; - } - $scheduled.push(goroutine); - if ($curGoroutine === $noGoroutine) { - $runScheduled(); - } -}; - -var $setTimeout = function(f, t) { - $awakeGoroutines++; - return setTimeout(function() { - $awakeGoroutines--; - f(); - }, t); -}; - -var $block = function() { - if ($curGoroutine === $noGoroutine) { - $throwRuntimeError("cannot block in JavaScript callback, fix by wrapping code in goroutine"); - } - $curGoroutine.asleep = true; -}; - -var $send = function(chan, value) { - if (chan.$closed) { - $throwRuntimeError("send on closed channel"); - } - var queuedRecv = chan.$recvQueue.shift(); - if (queuedRecv !== undefined) { - queuedRecv([value, true]); - return; - } - if (chan.$buffer.length < chan.$capacity) { - chan.$buffer.push(value); - return; - } - - var thisGoroutine = $curGoroutine; - var closedDuringSend; - chan.$sendQueue.push(function(closed) { - closedDuringSend = closed; - $schedule(thisGoroutine); - return value; - }); - $block(); - return { - $blk: function() { - if (closedDuringSend) { - $throwRuntimeError("send on closed channel"); - } - } - }; -}; -var $recv = function(chan) { - var queuedSend = chan.$sendQueue.shift(); - if (queuedSend !== undefined) { - chan.$buffer.push(queuedSend(false)); - } - var bufferedValue = chan.$buffer.shift(); - if (bufferedValue !== undefined) { - return [bufferedValue, true]; - } - if (chan.$closed) { - return [chan.$elem.zero(), false]; - } - - var thisGoroutine = $curGoroutine; - var f = { $blk: function() { return this.value; } }; - var queueEntry = function(v) { - f.value = v; - $schedule(thisGoroutine); - }; - chan.$recvQueue.push(queueEntry); - $block(); - return f; -}; -var $close = function(chan) { - if (chan.$closed) { - $throwRuntimeError("close of closed channel"); - } - chan.$closed = true; - while (true) { - var queuedSend = chan.$sendQueue.shift(); - if (queuedSend === undefined) { - break; - } - queuedSend(true); /* will panic */ - } - while (true) { - var queuedRecv = chan.$recvQueue.shift(); - if (queuedRecv === undefined) { - break; - } - queuedRecv([chan.$elem.zero(), false]); - } -}; -var $select = function(comms) { - var ready = []; - var selection = -1; - for (var i = 0; i < comms.length; i++) { - var comm = comms[i]; - var chan = comm[0]; - switch (comm.length) { - case 0: /* default */ - selection = i; - break; - case 1: /* recv */ - if (chan.$sendQueue.length !== 0 || chan.$buffer.length !== 0 || chan.$closed) { - ready.push(i); - } - break; - case 2: /* send */ - if (chan.$closed) { - $throwRuntimeError("send on closed channel"); - } - if (chan.$recvQueue.length !== 0 || chan.$buffer.length < chan.$capacity) { - ready.push(i); - } - break; - } - } - - if (ready.length !== 0) { - selection = ready[Math.floor(Math.random() * ready.length)]; - } - if (selection !== -1) { - var comm = comms[selection]; - switch (comm.length) { - case 0: /* default */ - return [selection]; - case 1: /* recv */ - return [selection, $recv(comm[0])]; - case 2: /* send */ - $send(comm[0], comm[1]); - return [selection]; - } - } - - var entries = []; - var thisGoroutine = $curGoroutine; - var f = { $blk: function() { return this.selection; } }; - var removeFromQueues = function() { - for (var i = 0; i < entries.length; i++) { - var entry = entries[i]; - var queue = entry[0]; - var index = queue.indexOf(entry[1]); - if (index !== -1) { - queue.splice(index, 1); - } - } - }; - for (var i = 0; i < comms.length; i++) { - (function(i) { - var comm = comms[i]; - switch (comm.length) { - case 1: /* recv */ - var queueEntry = function(value) { - f.selection = [i, value]; - removeFromQueues(); - $schedule(thisGoroutine); - }; - entries.push([comm[0].$recvQueue, queueEntry]); - comm[0].$recvQueue.push(queueEntry); - break; - case 2: /* send */ - var queueEntry = function() { - if (comm[0].$closed) { - $throwRuntimeError("send on closed channel"); - } - f.selection = [i]; - removeFromQueues(); - $schedule(thisGoroutine); - return comm[1]; - }; - entries.push([comm[0].$sendQueue, queueEntry]); - comm[0].$sendQueue.push(queueEntry); - break; - } - })(i); - } - $block(); - return f; -}; -` diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/jsmapping.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/jsmapping.go deleted file mode 100644 index dc29cba6b..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/jsmapping.go +++ /dev/null @@ -1,379 +0,0 @@ -package prelude - -const jsmapping = ` -var $jsObjectPtr, $jsErrorPtr; - -var $needsExternalization = function(t) { - switch (t.kind) { - case $kindBool: - case $kindInt: - case $kindInt8: - case $kindInt16: - case $kindInt32: - case $kindUint: - case $kindUint8: - case $kindUint16: - case $kindUint32: - case $kindUintptr: - case $kindFloat32: - case $kindFloat64: - return false; - default: - return t !== $jsObjectPtr; - } -}; - -var $externalize = function(v, t) { - if (t === $jsObjectPtr) { - return v; - } - switch (t.kind) { - case $kindBool: - case $kindInt: - case $kindInt8: - case $kindInt16: - case $kindInt32: - case $kindUint: - case $kindUint8: - case $kindUint16: - case $kindUint32: - case $kindUintptr: - case $kindFloat32: - case $kindFloat64: - return v; - case $kindInt64: - case $kindUint64: - return $flatten64(v); - case $kindArray: - if ($needsExternalization(t.elem)) { - return $mapArray(v, function(e) { return $externalize(e, t.elem); }); - } - return v; - case $kindFunc: - return $externalizeFunction(v, t, false); - case $kindInterface: - if (v === $ifaceNil) { - return null; - } - if (v.constructor === $jsObjectPtr) { - return v.$val.object; - } - return $externalize(v.$val, v.constructor); - case $kindMap: - var m = {}; - var keys = $keys(v); - for (var i = 0; i < keys.length; i++) { - var entry = v[keys[i]]; - m[$externalize(entry.k, t.key)] = $externalize(entry.v, t.elem); - } - return m; - case $kindPtr: - if (v === t.nil) { - return null; - } - return $externalize(v.$get(), t.elem); - case $kindSlice: - if ($needsExternalization(t.elem)) { - return $mapArray($sliceToArray(v), function(e) { return $externalize(e, t.elem); }); - } - return $sliceToArray(v); - case $kindString: - if ($isASCII(v)) { - return v; - } - var s = "", r; - for (var i = 0; i < v.length; i += r[1]) { - r = $decodeRune(v, i); - var c = r[0]; - if (c > 0xFFFF) { - var h = Math.floor((c - 0x10000) / 0x400) + 0xD800; - var l = (c - 0x10000) % 0x400 + 0xDC00; - s += String.fromCharCode(h, l); - continue; - } - s += String.fromCharCode(c); - } - return s; - case $kindStruct: - var timePkg = $packages["time"]; - if (timePkg !== undefined && v.constructor === timePkg.Time.ptr) { - var milli = $div64(v.UnixNano(), new $Int64(0, 1000000)); - return new Date($flatten64(milli)); - } - - var noJsObject = {}; - var searchJsObject = function(v, t) { - if (t === $jsObjectPtr) { - return v; - } - switch (t.kind) { - case $kindPtr: - if (v === t.nil) { - return noJsObject; - } - return searchJsObject(v.$get(), t.elem); - case $kindStruct: - var f = t.fields[0]; - return searchJsObject(v[f.prop], f.typ); - case $kindInterface: - return searchJsObject(v.$val, v.constructor); - default: - return noJsObject; - } - }; - var o = searchJsObject(v, t); - if (o !== noJsObject) { - return o; - } - - o = {}; - for (var i = 0; i < t.fields.length; i++) { - var f = t.fields[i]; - if (!f.exported) { - continue; - } - o[f.name] = $externalize(v[f.prop], f.typ); - } - return o; - } - $throwRuntimeError("cannot externalize " + t.string); -}; - -var $externalizeFunction = function(v, t, passThis) { - if (v === $throwNilPointerError) { - return null; - } - if (v.$externalizeWrapper === undefined) { - $checkForDeadlock = false; - v.$externalizeWrapper = function() { - var args = []; - for (var i = 0; i < t.params.length; i++) { - if (t.variadic && i === t.params.length - 1) { - var vt = t.params[i].elem, varargs = []; - for (var j = i; j < arguments.length; j++) { - varargs.push($internalize(arguments[j], vt)); - } - args.push(new (t.params[i])(varargs)); - break; - } - args.push($internalize(arguments[i], t.params[i])); - } - var result = v.apply(passThis ? this : undefined, args); - switch (t.results.length) { - case 0: - return; - case 1: - return $externalize(result, t.results[0]); - default: - for (var i = 0; i < t.results.length; i++) { - result[i] = $externalize(result[i], t.results[i]); - } - return result; - } - }; - } - return v.$externalizeWrapper; -}; - -var $internalize = function(v, t, recv) { - if (t === $jsObjectPtr) { - return v; - } - if (t === $jsObjectPtr.elem) { - $throwRuntimeError("cannot internalize js.Object, use *js.Object instead"); - } - if (v && v.__internal_object__ !== undefined) { - return $assertType(v.__internal_object__, t, false); - } - var timePkg = $packages["time"]; - if (timePkg !== undefined && t === timePkg.Time) { - if (!(v !== null && v !== undefined && v.constructor === Date)) { - $throwRuntimeError("cannot internalize time.Time from " + typeof v + ", must be Date"); - } - return timePkg.Unix(new $Int64(0, 0), new $Int64(0, v.getTime() * 1000000)); - } - switch (t.kind) { - case $kindBool: - return !!v; - case $kindInt: - return parseInt(v); - case $kindInt8: - return parseInt(v) << 24 >> 24; - case $kindInt16: - return parseInt(v) << 16 >> 16; - case $kindInt32: - return parseInt(v) >> 0; - case $kindUint: - return parseInt(v); - case $kindUint8: - return parseInt(v) << 24 >>> 24; - case $kindUint16: - return parseInt(v) << 16 >>> 16; - case $kindUint32: - case $kindUintptr: - return parseInt(v) >>> 0; - case $kindInt64: - case $kindUint64: - return new t(0, v); - case $kindFloat32: - case $kindFloat64: - return parseFloat(v); - case $kindArray: - if (v.length !== t.len) { - $throwRuntimeError("got array with wrong size from JavaScript native"); - } - return $mapArray(v, function(e) { return $internalize(e, t.elem); }); - case $kindFunc: - return function() { - var args = []; - for (var i = 0; i < t.params.length; i++) { - if (t.variadic && i === t.params.length - 1) { - var vt = t.params[i].elem, varargs = arguments[i]; - for (var j = 0; j < varargs.$length; j++) { - args.push($externalize(varargs.$array[varargs.$offset + j], vt)); - } - break; - } - args.push($externalize(arguments[i], t.params[i])); - } - var result = v.apply(recv, args); - switch (t.results.length) { - case 0: - return; - case 1: - return $internalize(result, t.results[0]); - default: - for (var i = 0; i < t.results.length; i++) { - result[i] = $internalize(result[i], t.results[i]); - } - return result; - } - }; - case $kindInterface: - if (t.methods.length !== 0) { - $throwRuntimeError("cannot internalize " + t.string); - } - if (v === null) { - return $ifaceNil; - } - if (v === undefined) { - return new $jsObjectPtr(undefined); - } - switch (v.constructor) { - case Int8Array: - return new ($sliceType($Int8))(v); - case Int16Array: - return new ($sliceType($Int16))(v); - case Int32Array: - return new ($sliceType($Int))(v); - case Uint8Array: - return new ($sliceType($Uint8))(v); - case Uint16Array: - return new ($sliceType($Uint16))(v); - case Uint32Array: - return new ($sliceType($Uint))(v); - case Float32Array: - return new ($sliceType($Float32))(v); - case Float64Array: - return new ($sliceType($Float64))(v); - case Array: - return $internalize(v, $sliceType($emptyInterface)); - case Boolean: - return new $Bool(!!v); - case Date: - if (timePkg === undefined) { - /* time package is not present, internalize as &js.Object{Date} so it can be externalized into original Date. */ - return new $jsObjectPtr(v); - } - return new timePkg.Time($internalize(v, timePkg.Time)); - case Function: - var funcType = $funcType([$sliceType($emptyInterface)], [$jsObjectPtr], true); - return new funcType($internalize(v, funcType)); - case Number: - return new $Float64(parseFloat(v)); - case String: - return new $String($internalize(v, $String)); - default: - if ($global.Node && v instanceof $global.Node) { - return new $jsObjectPtr(v); - } - var mapType = $mapType($String, $emptyInterface); - return new mapType($internalize(v, mapType)); - } - case $kindMap: - var m = {}; - var keys = $keys(v); - for (var i = 0; i < keys.length; i++) { - var k = $internalize(keys[i], t.key); - m[t.key.keyFor(k)] = { k: k, v: $internalize(v[keys[i]], t.elem) }; - } - return m; - case $kindPtr: - if (t.elem.kind === $kindStruct) { - return $internalize(v, t.elem); - } - case $kindSlice: - return new t($mapArray(v, function(e) { return $internalize(e, t.elem); })); - case $kindString: - v = String(v); - if ($isASCII(v)) { - return v; - } - var s = ""; - var i = 0; - while (i < v.length) { - var h = v.charCodeAt(i); - if (0xD800 <= h && h <= 0xDBFF) { - var l = v.charCodeAt(i + 1); - var c = (h - 0xD800) * 0x400 + l - 0xDC00 + 0x10000; - s += $encodeRune(c); - i += 2; - continue; - } - s += $encodeRune(h); - i++; - } - return s; - case $kindStruct: - var noJsObject = {}; - var searchJsObject = function(t) { - if (t === $jsObjectPtr) { - return v; - } - if (t === $jsObjectPtr.elem) { - $throwRuntimeError("cannot internalize js.Object, use *js.Object instead"); - } - switch (t.kind) { - case $kindPtr: - return searchJsObject(t.elem); - case $kindStruct: - var f = t.fields[0]; - var o = searchJsObject(f.typ); - if (o !== noJsObject) { - var n = new t.ptr(); - n[f.prop] = o; - return n; - } - return noJsObject; - default: - return noJsObject; - } - }; - var o = searchJsObject(t); - if (o !== noJsObject) { - return o; - } - } - $throwRuntimeError("cannot internalize " + t.string); -}; - -/* $isASCII reports whether string s contains only ASCII characters. */ -var $isASCII = function(s) { - for (var i = 0; i < s.length; i++) { - if (s.charCodeAt(i) >= 128) { - return false; - } - } - return true; -}; -` diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/numeric.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/numeric.go deleted file mode 100644 index 063d09f46..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/numeric.go +++ /dev/null @@ -1,196 +0,0 @@ -package prelude - -const numeric = ` -var $min = Math.min; -var $mod = function(x, y) { return x % y; }; -var $parseInt = parseInt; -var $parseFloat = function(f) { - if (f !== undefined && f !== null && f.constructor === Number) { - return f; - } - return parseFloat(f); -}; - -var $froundBuf = new Float32Array(1); -var $fround = Math.fround || function(f) { - $froundBuf[0] = f; - return $froundBuf[0]; -}; - -var $imul = Math.imul || function(a, b) { - var ah = (a >>> 16) & 0xffff; - var al = a & 0xffff; - var bh = (b >>> 16) & 0xffff; - var bl = b & 0xffff; - return ((al * bl) + (((ah * bl + al * bh) << 16) >>> 0) >> 0); -}; - -var $floatKey = function(f) { - if (f !== f) { - $idCounter++; - return "NaN$" + $idCounter; - } - return String(f); -}; - -var $flatten64 = function(x) { - return x.$high * 4294967296 + x.$low; -}; - -var $shiftLeft64 = function(x, y) { - if (y === 0) { - return x; - } - if (y < 32) { - return new x.constructor(x.$high << y | x.$low >>> (32 - y), (x.$low << y) >>> 0); - } - if (y < 64) { - return new x.constructor(x.$low << (y - 32), 0); - } - return new x.constructor(0, 0); -}; - -var $shiftRightInt64 = function(x, y) { - if (y === 0) { - return x; - } - if (y < 32) { - return new x.constructor(x.$high >> y, (x.$low >>> y | x.$high << (32 - y)) >>> 0); - } - if (y < 64) { - return new x.constructor(x.$high >> 31, (x.$high >> (y - 32)) >>> 0); - } - if (x.$high < 0) { - return new x.constructor(-1, 4294967295); - } - return new x.constructor(0, 0); -}; - -var $shiftRightUint64 = function(x, y) { - if (y === 0) { - return x; - } - if (y < 32) { - return new x.constructor(x.$high >>> y, (x.$low >>> y | x.$high << (32 - y)) >>> 0); - } - if (y < 64) { - return new x.constructor(0, x.$high >>> (y - 32)); - } - return new x.constructor(0, 0); -}; - -var $mul64 = function(x, y) { - var high = 0, low = 0; - if ((y.$low & 1) !== 0) { - high = x.$high; - low = x.$low; - } - for (var i = 1; i < 32; i++) { - if ((y.$low & 1<>> (32 - i); - low += (x.$low << i) >>> 0; - } - } - for (var i = 0; i < 32; i++) { - if ((y.$high & 1< yHigh) || (xHigh === yHigh && xLow > yLow))) { - yHigh = (yHigh << 1 | yLow >>> 31) >>> 0; - yLow = (yLow << 1) >>> 0; - n++; - } - for (var i = 0; i <= n; i++) { - high = high << 1 | low >>> 31; - low = (low << 1) >>> 0; - if ((xHigh > yHigh) || (xHigh === yHigh && xLow >= yLow)) { - xHigh = xHigh - yHigh; - xLow = xLow - yLow; - if (xLow < 0) { - xHigh--; - xLow += 4294967296; - } - low++; - if (low === 4294967296) { - high++; - low = 0; - } - } - yLow = (yLow >>> 1 | yHigh << (32 - 1)) >>> 0; - yHigh = yHigh >>> 1; - } - - if (returnRemainder) { - return new x.constructor(xHigh * rs, xLow * rs); - } - return new x.constructor(high * s, low * s); -}; - -var $divComplex = function(n, d) { - var ninf = n.$real === Infinity || n.$real === -Infinity || n.$imag === Infinity || n.$imag === -Infinity; - var dinf = d.$real === Infinity || d.$real === -Infinity || d.$imag === Infinity || d.$imag === -Infinity; - var nnan = !ninf && (n.$real !== n.$real || n.$imag !== n.$imag); - var dnan = !dinf && (d.$real !== d.$real || d.$imag !== d.$imag); - if(nnan || dnan) { - return new n.constructor(NaN, NaN); - } - if (ninf && !dinf) { - return new n.constructor(Infinity, Infinity); - } - if (!ninf && dinf) { - return new n.constructor(0, 0); - } - if (d.$real === 0 && d.$imag === 0) { - if (n.$real === 0 && n.$imag === 0) { - return new n.constructor(NaN, NaN); - } - return new n.constructor(Infinity, Infinity); - } - var a = Math.abs(d.$real); - var b = Math.abs(d.$imag); - if (a <= b) { - var ratio = d.$real / d.$imag; - var denom = d.$real * ratio + d.$imag; - return new n.constructor((n.$real * ratio + n.$imag) / denom, (n.$imag * ratio - n.$real) / denom); - } - var ratio = d.$imag / d.$real; - var denom = d.$imag * ratio + d.$real; - return new n.constructor((n.$imag * ratio + n.$real) / denom, (n.$imag - n.$real * ratio) / denom); -}; -` diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/prelude.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/prelude.go deleted file mode 100644 index c27601e88..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/prelude.go +++ /dev/null @@ -1,425 +0,0 @@ -package prelude - -//go:generate go run genmin.go - -// Prelude is the GopherJS JavaScript interop layer. -const Prelude = prelude + numeric + types + goroutines + jsmapping - -const prelude = `Error.stackTraceLimit = Infinity; - -var $global, $module; -if (typeof window !== "undefined") { /* web page */ - $global = window; -} else if (typeof self !== "undefined") { /* web worker */ - $global = self; -} else if (typeof global !== "undefined") { /* Node.js */ - $global = global; - $global.require = require; -} else { /* others (e.g. Nashorn) */ - $global = this; -} - -if ($global === undefined || $global.Array === undefined) { - throw new Error("no global object found"); -} -if (typeof module !== "undefined") { - $module = module; -} - -var $packages = {}, $idCounter = 0; -var $keys = function(m) { return m ? Object.keys(m) : []; }; -var $flushConsole = function() {}; -var $throwRuntimeError; /* set by package "runtime" */ -var $throwNilPointerError = function() { $throwRuntimeError("invalid memory address or nil pointer dereference"); }; -var $call = function(fn, rcvr, args) { return fn.apply(rcvr, args); }; -var $makeFunc = function(fn) { return function() { return $externalize(fn(this, new ($sliceType($jsObjectPtr))($global.Array.prototype.slice.call(arguments, []))), $emptyInterface); }; }; -var $unused = function(v) {}; - -var $mapArray = function(array, f) { - var newArray = new array.constructor(array.length); - for (var i = 0; i < array.length; i++) { - newArray[i] = f(array[i]); - } - return newArray; -}; - -var $methodVal = function(recv, name) { - var vals = recv.$methodVals || {}; - recv.$methodVals = vals; /* noop for primitives */ - var f = vals[name]; - if (f !== undefined) { - return f; - } - var method = recv[name]; - f = function() { - $stackDepthOffset--; - try { - return method.apply(recv, arguments); - } finally { - $stackDepthOffset++; - } - }; - vals[name] = f; - return f; -}; - -var $methodExpr = function(typ, name) { - var method = typ.prototype[name]; - if (method.$expr === undefined) { - method.$expr = function() { - $stackDepthOffset--; - try { - if (typ.wrapped) { - arguments[0] = new typ(arguments[0]); - } - return Function.call.apply(method, arguments); - } finally { - $stackDepthOffset++; - } - }; - } - return method.$expr; -}; - -var $ifaceMethodExprs = {}; -var $ifaceMethodExpr = function(name) { - var expr = $ifaceMethodExprs["$" + name]; - if (expr === undefined) { - expr = $ifaceMethodExprs["$" + name] = function() { - $stackDepthOffset--; - try { - return Function.call.apply(arguments[0][name], arguments); - } finally { - $stackDepthOffset++; - } - }; - } - return expr; -}; - -var $subslice = function(slice, low, high, max) { - if (high === undefined) { - high = slice.$length; - } - if (max === undefined) { - max = slice.$capacity; - } - if (low < 0 || high < low || max < high || high > slice.$capacity || max > slice.$capacity) { - $throwRuntimeError("slice bounds out of range"); - } - if (slice === slice.constructor.nil) { - return slice; - } - var s = new slice.constructor(slice.$array); - s.$offset = slice.$offset + low; - s.$length = high - low; - s.$capacity = max - low; - return s; -}; - -var $substring = function(str, low, high) { - if (low < 0 || high < low || high > str.length) { - $throwRuntimeError("slice bounds out of range"); - } - return str.substring(low, high); -}; - -var $sliceToArray = function(slice) { - if (slice.$array.constructor !== Array) { - return slice.$array.subarray(slice.$offset, slice.$offset + slice.$length); - } - return slice.$array.slice(slice.$offset, slice.$offset + slice.$length); -}; - -var $decodeRune = function(str, pos) { - var c0 = str.charCodeAt(pos); - - if (c0 < 0x80) { - return [c0, 1]; - } - - if (c0 !== c0 || c0 < 0xC0) { - return [0xFFFD, 1]; - } - - var c1 = str.charCodeAt(pos + 1); - if (c1 !== c1 || c1 < 0x80 || 0xC0 <= c1) { - return [0xFFFD, 1]; - } - - if (c0 < 0xE0) { - var r = (c0 & 0x1F) << 6 | (c1 & 0x3F); - if (r <= 0x7F) { - return [0xFFFD, 1]; - } - return [r, 2]; - } - - var c2 = str.charCodeAt(pos + 2); - if (c2 !== c2 || c2 < 0x80 || 0xC0 <= c2) { - return [0xFFFD, 1]; - } - - if (c0 < 0xF0) { - var r = (c0 & 0x0F) << 12 | (c1 & 0x3F) << 6 | (c2 & 0x3F); - if (r <= 0x7FF) { - return [0xFFFD, 1]; - } - if (0xD800 <= r && r <= 0xDFFF) { - return [0xFFFD, 1]; - } - return [r, 3]; - } - - var c3 = str.charCodeAt(pos + 3); - if (c3 !== c3 || c3 < 0x80 || 0xC0 <= c3) { - return [0xFFFD, 1]; - } - - if (c0 < 0xF8) { - var r = (c0 & 0x07) << 18 | (c1 & 0x3F) << 12 | (c2 & 0x3F) << 6 | (c3 & 0x3F); - if (r <= 0xFFFF || 0x10FFFF < r) { - return [0xFFFD, 1]; - } - return [r, 4]; - } - - return [0xFFFD, 1]; -}; - -var $encodeRune = function(r) { - if (r < 0 || r > 0x10FFFF || (0xD800 <= r && r <= 0xDFFF)) { - r = 0xFFFD; - } - if (r <= 0x7F) { - return String.fromCharCode(r); - } - if (r <= 0x7FF) { - return String.fromCharCode(0xC0 | r >> 6, 0x80 | (r & 0x3F)); - } - if (r <= 0xFFFF) { - return String.fromCharCode(0xE0 | r >> 12, 0x80 | (r >> 6 & 0x3F), 0x80 | (r & 0x3F)); - } - return String.fromCharCode(0xF0 | r >> 18, 0x80 | (r >> 12 & 0x3F), 0x80 | (r >> 6 & 0x3F), 0x80 | (r & 0x3F)); -}; - -var $stringToBytes = function(str) { - var array = new Uint8Array(str.length); - for (var i = 0; i < str.length; i++) { - array[i] = str.charCodeAt(i); - } - return array; -}; - -var $bytesToString = function(slice) { - if (slice.$length === 0) { - return ""; - } - var str = ""; - for (var i = 0; i < slice.$length; i += 10000) { - str += String.fromCharCode.apply(undefined, slice.$array.subarray(slice.$offset + i, slice.$offset + Math.min(slice.$length, i + 10000))); - } - return str; -}; - -var $stringToRunes = function(str) { - var array = new Int32Array(str.length); - var rune, j = 0; - for (var i = 0; i < str.length; i += rune[1], j++) { - rune = $decodeRune(str, i); - array[j] = rune[0]; - } - return array.subarray(0, j); -}; - -var $runesToString = function(slice) { - if (slice.$length === 0) { - return ""; - } - var str = ""; - for (var i = 0; i < slice.$length; i++) { - str += $encodeRune(slice.$array[slice.$offset + i]); - } - return str; -}; - -var $copyString = function(dst, src) { - var n = Math.min(src.length, dst.$length); - for (var i = 0; i < n; i++) { - dst.$array[dst.$offset + i] = src.charCodeAt(i); - } - return n; -}; - -var $copySlice = function(dst, src) { - var n = Math.min(src.$length, dst.$length); - $copyArray(dst.$array, src.$array, dst.$offset, src.$offset, n, dst.constructor.elem); - return n; -}; - -var $copyArray = function(dst, src, dstOffset, srcOffset, n, elem) { - if (n === 0 || (dst === src && dstOffset === srcOffset)) { - return; - } - - if (src.subarray) { - dst.set(src.subarray(srcOffset, srcOffset + n), dstOffset); - return; - } - - switch (elem.kind) { - case $kindArray: - case $kindStruct: - if (dst === src && dstOffset > srcOffset) { - for (var i = n - 1; i >= 0; i--) { - elem.copy(dst[dstOffset + i], src[srcOffset + i]); - } - return; - } - for (var i = 0; i < n; i++) { - elem.copy(dst[dstOffset + i], src[srcOffset + i]); - } - return; - } - - if (dst === src && dstOffset > srcOffset) { - for (var i = n - 1; i >= 0; i--) { - dst[dstOffset + i] = src[srcOffset + i]; - } - return; - } - for (var i = 0; i < n; i++) { - dst[dstOffset + i] = src[srcOffset + i]; - } -}; - -var $clone = function(src, type) { - var clone = type.zero(); - type.copy(clone, src); - return clone; -}; - -var $pointerOfStructConversion = function(obj, type) { - if(obj.$proxies === undefined) { - obj.$proxies = {}; - obj.$proxies[obj.constructor.string] = obj; - } - var proxy = obj.$proxies[type.string]; - if (proxy === undefined) { - var properties = {}; - for (var i = 0; i < type.elem.fields.length; i++) { - (function(fieldProp) { - properties[fieldProp] = { - get: function() { return obj[fieldProp]; }, - set: function(value) { obj[fieldProp] = value; } - }; - })(type.elem.fields[i].prop); - } - proxy = Object.create(type.prototype, properties); - proxy.$val = proxy; - obj.$proxies[type.string] = proxy; - proxy.$proxies = obj.$proxies; - } - return proxy; -}; - -var $append = function(slice) { - return $internalAppend(slice, arguments, 1, arguments.length - 1); -}; - -var $appendSlice = function(slice, toAppend) { - if (toAppend.constructor === String) { - var bytes = $stringToBytes(toAppend); - return $internalAppend(slice, bytes, 0, bytes.length); - } - return $internalAppend(slice, toAppend.$array, toAppend.$offset, toAppend.$length); -}; - -var $internalAppend = function(slice, array, offset, length) { - if (length === 0) { - return slice; - } - - var newArray = slice.$array; - var newOffset = slice.$offset; - var newLength = slice.$length + length; - var newCapacity = slice.$capacity; - - if (newLength > newCapacity) { - newOffset = 0; - newCapacity = Math.max(newLength, slice.$capacity < 1024 ? slice.$capacity * 2 : Math.floor(slice.$capacity * 5 / 4)); - - if (slice.$array.constructor === Array) { - newArray = slice.$array.slice(slice.$offset, slice.$offset + slice.$length); - newArray.length = newCapacity; - var zero = slice.constructor.elem.zero; - for (var i = slice.$length; i < newCapacity; i++) { - newArray[i] = zero(); - } - } else { - newArray = new slice.$array.constructor(newCapacity); - newArray.set(slice.$array.subarray(slice.$offset, slice.$offset + slice.$length)); - } - } - - $copyArray(newArray, array, newOffset + slice.$length, offset, length, slice.constructor.elem); - - var newSlice = new slice.constructor(newArray); - newSlice.$offset = newOffset; - newSlice.$length = newLength; - newSlice.$capacity = newCapacity; - return newSlice; -}; - -var $equal = function(a, b, type) { - if (type === $jsObjectPtr) { - return a === b; - } - switch (type.kind) { - case $kindComplex64: - case $kindComplex128: - return a.$real === b.$real && a.$imag === b.$imag; - case $kindInt64: - case $kindUint64: - return a.$high === b.$high && a.$low === b.$low; - case $kindArray: - if (a.length !== b.length) { - return false; - } - for (var i = 0; i < a.length; i++) { - if (!$equal(a[i], b[i], type.elem)) { - return false; - } - } - return true; - case $kindStruct: - for (var i = 0; i < type.fields.length; i++) { - var f = type.fields[i]; - if (!$equal(a[f.prop], b[f.prop], f.typ)) { - return false; - } - } - return true; - case $kindInterface: - return $interfaceIsEqual(a, b); - default: - return a === b; - } -}; - -var $interfaceIsEqual = function(a, b) { - if (a === $ifaceNil || b === $ifaceNil) { - return a === b; - } - if (a.constructor !== b.constructor) { - return false; - } - if (a.constructor === $jsObjectPtr) { - return a.object === b.object; - } - if (!a.constructor.comparable) { - $throwRuntimeError("comparing uncomparable type " + a.constructor.string); - } - return $equal(a.$val, b.$val, a.constructor); -}; -` diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/prelude_min.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/prelude_min.go deleted file mode 100644 index 0918ffa0b..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/prelude_min.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by genmin; DO NOT EDIT. - -package prelude - -// Minified is an uglifyjs-minified version of Prelude. -const Minified = "var $global,$module;if(Error.stackTraceLimit=1/0,\"undefined\"!=typeof window?$global=window:\"undefined\"!=typeof self?$global=self:\"undefined\"!=typeof global?($global=global).require=require:$global=this,void 0===$global||void 0===$global.Array)throw new Error(\"no global object found\");\"undefined\"!=typeof module&&($module=module);var $throwRuntimeError,$packages={},$idCounter=0,$keys=function(e){return e?Object.keys(e):[]},$flushConsole=function(){},$throwNilPointerError=function(){$throwRuntimeError(\"invalid memory address or nil pointer dereference\")},$call=function(e,n,r){return e.apply(n,r)},$makeFunc=function(e){return function(){return $externalize(e(this,new($sliceType($jsObjectPtr))($global.Array.prototype.slice.call(arguments,[]))),$emptyInterface)}},$unused=function(e){},$mapArray=function(e,n){for(var r=new e.constructor(e.length),t=0;te.$capacity||t>e.$capacity)&&$throwRuntimeError(\"slice bounds out of range\"),e===e.constructor.nil)return e;var i=new e.constructor(e.$array);return i.$offset=e.$offset+n,i.$length=r-n,i.$capacity=t-n,i},$substring=function(e,n,r){return(n<0||re.length)&&$throwRuntimeError(\"slice bounds out of range\"),e.substring(n,r)},$sliceToArray=function(e){return e.$array.constructor!==Array?e.$array.subarray(e.$offset,e.$offset+e.$length):e.$array.slice(e.$offset,e.$offset+e.$length)},$decodeRune=function(e,n){var r=e.charCodeAt(n);if(r<128)return[r,1];if(r!=r||r<192)return[65533,1];var t=e.charCodeAt(n+1);if(t!=t||t<128||192<=t)return[65533,1];if(r<224)return(a=(31&r)<<6|63&t)<=127?[65533,1]:[a,2];var i=e.charCodeAt(n+2);if(i!=i||i<128||192<=i)return[65533,1];if(r<240)return(a=(15&r)<<12|(63&t)<<6|63&i)<=2047?[65533,1]:55296<=a&&a<=57343?[65533,1]:[a,3];var a,o=e.charCodeAt(n+3);return o!=o||o<128||192<=o?[65533,1]:r<248?(a=(7&r)<<18|(63&t)<<12|(63&i)<<6|63&o)<=65535||11141111114111||55296<=e&&e<=57343)&&(e=65533),e<=127?String.fromCharCode(e):e<=2047?String.fromCharCode(192|e>>6,128|63&e):e<=65535?String.fromCharCode(224|e>>12,128|e>>6&63,128|63&e):String.fromCharCode(240|e>>18,128|e>>12&63,128|e>>6&63,128|63&e)},$stringToBytes=function(e){for(var n=new Uint8Array(e.length),r=0;rt){for(var o=i-1;o>=0;o--)a.copy(e[r+o],n[t+o]);return}for(o=0;ot)for(o=i-1;o>=0;o--)e[r+o]=n[t+o];else for(o=0;o$)if(a=0,$=Math.max(o,e.$capacity<1024?2*e.$capacity:Math.floor(5*e.$capacity/4)),e.$array.constructor===Array){(i=e.$array.slice(e.$offset,e.$offset+e.$length)).length=$;for(var c=e.constructor.elem.zero,u=e.$length;u<$;u++)i[u]=c()}else(i=new e.$array.constructor($)).set(e.$array.subarray(e.$offset,e.$offset+e.$length));$copyArray(i,n,a+e.$length,r,t,e.constructor.elem);var l=new e.constructor(i);return l.$offset=a,l.$length=o,l.$capacity=$,l},$equal=function(e,n,r){if(r===$jsObjectPtr)return e===n;switch(r.kind){case $kindComplex64:case $kindComplex128:return e.$real===n.$real&&e.$imag===n.$imag;case $kindInt64:case $kindUint64:return e.$high===n.$high&&e.$low===n.$low;case $kindArray:if(e.length!==n.length)return!1;for(var t=0;t>>16&65535)*t+r*(n>>>16&65535)<<16>>>0)>>0},$floatKey=function(e){return e!=e?\"NaN$\"+ ++$idCounter:String(e)},$flatten64=function(e){return 4294967296*e.$high+e.$low},$shiftLeft64=function(e,n){return 0===n?e:n<32?new e.constructor(e.$high<>>32-n,e.$low<>>0):n<64?new e.constructor(e.$low<>n,(e.$low>>>n|e.$high<<32-n)>>>0):n<64?new e.constructor(e.$high>>31,e.$high>>n-32>>>0):e.$high<0?new e.constructor(-1,4294967295):new e.constructor(0,0)},$shiftRightUint64=function(e,n){return 0===n?e:n<32?new e.constructor(e.$high>>>n,(e.$low>>>n|e.$high<<32-n)>>>0):n<64?new e.constructor(0,e.$high>>>n-32):new e.constructor(0,0)},$mul64=function(e,n){var r=0,t=0;0!=(1&n.$low)&&(r=e.$high,t=e.$low);for(var i=1;i<32;i++)0!=(n.$low&1<>>32-i,t+=e.$low<>>0);for(i=0;i<32;i++)0!=(n.$high&1<$||a===$&&o>c);)$=($<<1|c>>>31)>>>0,c=c<<1>>>0,s++;for(var f=0;f<=s;f++)u=u<<1|l>>>31,l=l<<1>>>0,(a>$||a===$&&o>=c)&&(a-=$,(o-=c)<0&&(a--,o+=4294967296),4294967296===++l&&(u++,l=0)),c=(c>>>1|$<<31)>>>0,$>>>=1;return r?new e.constructor(a*i,o*i):new e.constructor(u*t,l*t)},$divComplex=function(e,n){var r=e.$real===1/0||e.$real===-1/0||e.$imag===1/0||e.$imag===-1/0,t=n.$real===1/0||n.$real===-1/0||n.$imag===1/0||n.$imag===-1/0,i=!r&&(e.$real!=e.$real||e.$imag!=e.$imag),a=!t&&(n.$real!=n.$real||n.$imag!=n.$imag);if(i||a)return new e.constructor(NaN,NaN);if(r&&!t)return new e.constructor(1/0,1/0);if(!r&&t)return new e.constructor(0,0);if(0===n.$real&&0===n.$imag)return 0===e.$real&&0===e.$imag?new e.constructor(NaN,NaN):new e.constructor(1/0,1/0);if(Math.abs(n.$real)<=Math.abs(n.$imag)){var o=n.$real/n.$imag,$=n.$real*o+n.$imag;return new e.constructor((e.$real*o+e.$imag)/$,(e.$imag*o-e.$real)/$)}o=n.$imag/n.$real,$=n.$imag*o+n.$real;return new e.constructor((e.$imag*o+e.$real)/$,(e.$imag-e.$real*o)/$)},$kindBool=1,$kindInt=2,$kindInt8=3,$kindInt16=4,$kindInt32=5,$kindInt64=6,$kindUint=7,$kindUint8=8,$kindUint16=9,$kindUint32=10,$kindUint64=11,$kindUintptr=12,$kindFloat32=13,$kindFloat64=14,$kindComplex64=15,$kindComplex128=16,$kindArray=17,$kindChan=18,$kindFunc=19,$kindInterface=20,$kindMap=21,$kindPtr=22,$kindSlice=23,$kindString=24,$kindStruct=25,$kindUnsafePointer=26,$methodSynthesizers=[],$addMethodSynthesizer=function(e){null!==$methodSynthesizers?$methodSynthesizers.push(e):e()},$synthesizeMethods=function(){$methodSynthesizers.forEach(function(e){e()}),$methodSynthesizers=null},$ifaceKeyFor=function(e){if(e===$ifaceNil)return\"nil\";var n=e.constructor;return n.string+\"$\"+n.keyFor(e.$val)},$identity=function(e){return e},$typeIDCounter=0,$idKey=function(e){return void 0===e.$id&&($idCounter++,e.$id=$idCounter),String(e.$id)},$newType=function(e,n,r,t,i,a,o){var $;switch(n){case $kindBool:case $kindInt:case $kindInt8:case $kindInt16:case $kindInt32:case $kindUint:case $kindUint8:case $kindUint16:case $kindUint32:case $kindUintptr:case $kindUnsafePointer:($=function(e){this.$val=e}).wrapped=!0,$.keyFor=$identity;break;case $kindString:($=function(e){this.$val=e}).wrapped=!0,$.keyFor=function(e){return\"$\"+e};break;case $kindFloat32:case $kindFloat64:($=function(e){this.$val=e}).wrapped=!0,$.keyFor=function(e){return $floatKey(e)};break;case $kindInt64:($=function(e,n){this.$high=e+Math.floor(Math.ceil(n)/4294967296)>>0,this.$low=n>>>0,this.$val=this}).keyFor=function(e){return e.$high+\"$\"+e.$low};break;case $kindUint64:($=function(e,n){this.$high=e+Math.floor(Math.ceil(n)/4294967296)>>>0,this.$low=n>>>0,this.$val=this}).keyFor=function(e){return e.$high+\"$\"+e.$low};break;case $kindComplex64:($=function(e,n){this.$real=$fround(e),this.$imag=$fround(n),this.$val=this}).keyFor=function(e){return e.$real+\"$\"+e.$imag};break;case $kindComplex128:($=function(e,n){this.$real=e,this.$imag=n,this.$val=this}).keyFor=function(e){return e.$real+\"$\"+e.$imag};break;case $kindArray:($=function(e){this.$val=e}).wrapped=!0,$.ptr=$newType(4,$kindPtr,\"*\"+r,!1,\"\",!1,function(e){this.$get=function(){return e},this.$set=function(e){$.copy(this,e)},this.$val=e}),$.init=function(e,n){$.elem=e,$.len=n,$.comparable=e.comparable,$.keyFor=function(n){return Array.prototype.join.call($mapArray(n,function(n){return String(e.keyFor(n)).replace(/\\\\/g,\"\\\\\\\\\").replace(/\\$/g,\"\\\\$\")}),\"$\")},$.copy=function(n,r){$copyArray(n,r,0,0,r.length,e)},$.ptr.init($),Object.defineProperty($.ptr.nil,\"nilCheck\",{get:$throwNilPointerError})};break;case $kindChan:($=function(e){this.$val=e}).wrapped=!0,$.keyFor=$idKey,$.init=function(e,n,r){$.elem=e,$.sendOnly=n,$.recvOnly=r};break;case $kindFunc:($=function(e){this.$val=e}).wrapped=!0,$.init=function(e,n,r){$.params=e,$.results=n,$.variadic=r,$.comparable=!1};break;case $kindInterface:($={implementedBy:{},missingMethodFor:{}}).keyFor=$ifaceKeyFor,$.init=function(e){$.methods=e,e.forEach(function(e){$ifaceNil[e.prop]=$throwNilPointerError})};break;case $kindMap:($=function(e){this.$val=e}).wrapped=!0,$.init=function(e,n){$.key=e,$.elem=n,$.comparable=!1};break;case $kindPtr:($=o||function(e,n,r){this.$get=e,this.$set=n,this.$target=r,this.$val=this}).keyFor=$idKey,$.init=function(e){$.elem=e,$.wrapped=e.kind===$kindArray,$.nil=new $($throwNilPointerError,$throwNilPointerError)};break;case $kindSlice:($=function(e){e.constructor!==$.nativeArray&&(e=new $.nativeArray(e)),this.$array=e,this.$offset=0,this.$length=e.length,this.$capacity=e.length,this.$val=this}).init=function(e){$.elem=e,$.comparable=!1,$.nativeArray=$nativeArray(e.kind),$.nil=new $([])};break;case $kindStruct:($=function(e){this.$val=e}).wrapped=!0,$.ptr=$newType(4,$kindPtr,\"*\"+r,!1,i,a,o),$.ptr.elem=$,$.ptr.prototype.$get=function(){return this},$.ptr.prototype.$set=function(e){$.copy(this,e)},$.init=function(e,n){$.pkgPath=e,$.fields=n,n.forEach(function(e){e.typ.comparable||($.comparable=!1)}),$.keyFor=function(e){var r=e.$val;return $mapArray(n,function(e){return String(e.typ.keyFor(r[e.prop])).replace(/\\\\/g,\"\\\\\\\\\").replace(/\\$/g,\"\\\\$\")}).join(\"$\")},$.copy=function(e,r){for(var t=0;t0;){var a=[],o=[];t.forEach(function(e){if(!i[e.typ.string])switch(i[e.typ.string]=!0,e.typ.named&&(o=o.concat(e.typ.methods),e.indirect&&(o=o.concat($ptrType(e.typ).methods))),e.typ.kind){case $kindStruct:e.typ.fields.forEach(function(n){if(n.embedded){var r=n.typ,t=r.kind===$kindPtr;a.push({typ:t?r.elem:r,indirect:e.indirect||t})}});break;case $kindInterface:o=o.concat(e.typ.methods)}}),o.forEach(function(e){void 0===n[e.name]&&(n[e.name]=e)}),t=a}return e.methodSetCache=[],Object.keys(n).sort().forEach(function(r){e.methodSetCache.push(n[r])}),e.methodSetCache},$Bool=$newType(1,$kindBool,\"bool\",!0,\"\",!1,null),$Int=$newType(4,$kindInt,\"int\",!0,\"\",!1,null),$Int8=$newType(1,$kindInt8,\"int8\",!0,\"\",!1,null),$Int16=$newType(2,$kindInt16,\"int16\",!0,\"\",!1,null),$Int32=$newType(4,$kindInt32,\"int32\",!0,\"\",!1,null),$Int64=$newType(8,$kindInt64,\"int64\",!0,\"\",!1,null),$Uint=$newType(4,$kindUint,\"uint\",!0,\"\",!1,null),$Uint8=$newType(1,$kindUint8,\"uint8\",!0,\"\",!1,null),$Uint16=$newType(2,$kindUint16,\"uint16\",!0,\"\",!1,null),$Uint32=$newType(4,$kindUint32,\"uint32\",!0,\"\",!1,null),$Uint64=$newType(8,$kindUint64,\"uint64\",!0,\"\",!1,null),$Uintptr=$newType(4,$kindUintptr,\"uintptr\",!0,\"\",!1,null),$Float32=$newType(4,$kindFloat32,\"float32\",!0,\"\",!1,null),$Float64=$newType(8,$kindFloat64,\"float64\",!0,\"\",!1,null),$Complex64=$newType(8,$kindComplex64,\"complex64\",!0,\"\",!1,null),$Complex128=$newType(16,$kindComplex128,\"complex128\",!0,\"\",!1,null),$String=$newType(8,$kindString,\"string\",!0,\"\",!1,null),$UnsafePointer=$newType(4,$kindUnsafePointer,\"unsafe.Pointer\",!0,\"\",!1,null),$nativeArray=function(e){switch(e){case $kindInt:return Int32Array;case $kindInt8:return Int8Array;case $kindInt16:return Int16Array;case $kindInt32:return Int32Array;case $kindUint:return Uint32Array;case $kindUint8:return Uint8Array;case $kindUint16:return Uint16Array;case $kindUint32:case $kindUintptr:return Uint32Array;case $kindFloat32:return Float32Array;case $kindFloat64:return Float64Array;default:return Array}},$toNativeArray=function(e,n){var r=$nativeArray(e);return r===Array?n:new r(n)},$arrayTypes={},$arrayType=function(e,n){var r=e.id+\"$\"+n,t=$arrayTypes[r];return void 0===t&&(t=$newType(12,$kindArray,\"[\"+n+\"]\"+e.string,!1,\"\",!1,null),$arrayTypes[r]=t,t.init(e,n)),t},$chanType=function(e,n,r){var t=(r?\"<-\":\"\")+\"chan\"+(n?\"<- \":\" \")+e.string,i=n?\"SendChan\":r?\"RecvChan\":\"Chan\",a=e[i];return void 0===a&&(a=$newType(4,$kindChan,t,!1,\"\",!1,null),e[i]=a,a.init(e,n,r)),a},$Chan=function(e,n){(n<0||n>2147483647)&&$throwRuntimeError(\"makechan: size out of range\"),this.$elem=e,this.$capacity=n,this.$buffer=[],this.$sendQueue=[],this.$recvQueue=[],this.$closed=!1},$chanNil=new $Chan(null,0);$chanNil.$sendQueue=$chanNil.$recvQueue={length:0,push:function(){},shift:function(){},indexOf:function(){return-1}};var $funcTypes={},$funcType=function(e,n,r){var t=$mapArray(e,function(e){return e.id}).join(\",\")+\"$\"+$mapArray(n,function(e){return e.id}).join(\",\")+\"$\"+r,i=$funcTypes[t];if(void 0===i){var a=$mapArray(e,function(e){return e.string});r&&(a[a.length-1]=\"...\"+a[a.length-1].substr(2));var o=\"func(\"+a.join(\", \")+\")\";1===n.length?o+=\" \"+n[0].string:n.length>1&&(o+=\" (\"+$mapArray(n,function(e){return e.string}).join(\", \")+\")\"),i=$newType(4,$kindFunc,o,!1,\"\",!1,null),$funcTypes[t]=i,i.init(e,n,r)}return i},$interfaceTypes={},$interfaceType=function(e){var n=$mapArray(e,function(e){return e.pkg+\",\"+e.name+\",\"+e.typ.id}).join(\"$\"),r=$interfaceTypes[n];if(void 0===r){var t=\"interface {}\";0!==e.length&&(t=\"interface { \"+$mapArray(e,function(e){return(\"\"!==e.pkg?e.pkg+\".\":\"\")+e.name+e.typ.string.substr(4)}).join(\"; \")+\" }\"),r=$newType(8,$kindInterface,t,!1,\"\",!1,null),$interfaceTypes[n]=r,r.init(e)}return r},$emptyInterface=$interfaceType([]),$ifaceNil={},$error=$newType(8,$kindInterface,\"error\",!0,\"\",!1,null);$error.init([{prop:\"Error\",name:\"Error\",pkg:\"\",typ:$funcType([],[$String],!1)}]);var $panicValue,$jsObjectPtr,$jsErrorPtr,$mapTypes={},$mapType=function(e,n){var r=e.id+\"$\"+n.id,t=$mapTypes[r];return void 0===t&&(t=$newType(4,$kindMap,\"map[\"+e.string+\"]\"+n.string,!1,\"\",!1,null),$mapTypes[r]=t,t.init(e,n)),t},$makeMap=function(e,n){for(var r={},t=0;t2147483647)&&$throwRuntimeError(\"makeslice: len out of range\"),(r<0||r2147483647)&&$throwRuntimeError(\"makeslice: cap out of range\");var t=new e.nativeArray(r);if(e.nativeArray===Array)for(var i=0;i=$curGoroutine.deferStack.length)throw n;if(null!==n){var t=null;try{$curGoroutine.deferStack.push(e),$panic(new $jsErrorPtr(n))}catch(e){t=e}return $curGoroutine.deferStack.pop(),void $callDeferred(e,t)}if(!$curGoroutine.asleep){$stackDepthOffset--;var i=$panicStackDepth,a=$panicValue,o=$curGoroutine.panicStack.pop();void 0!==o&&($panicStackDepth=$getStackDepth(),$panicValue=o);try{for(;;){if(null===e&&void 0===(e=$curGoroutine.deferStack[$curGoroutine.deferStack.length-1])){if($panicStackDepth=null,o.Object instanceof Error)throw o.Object;var $;throw $=o.constructor===$String?o.$val:void 0!==o.Error?o.Error():void 0!==o.String?o.String():o,new Error($)}var c=e.pop();if(void 0===c){if($curGoroutine.deferStack.pop(),void 0!==o){e=null;continue}return}var u=c[0].apply(c[2],c[1]);if(u&&void 0!==u.$blk){if(e.push([u.$blk,[],u]),r)throw null;return}if(void 0!==o&&null===$panicStackDepth)throw null}}finally{void 0!==o&&(null!==$panicStackDepth&&$curGoroutine.panicStack.push(o),$panicStackDepth=i,$panicValue=a),$stackDepthOffset++}}},$panic=function(e){$curGoroutine.panicStack.push(e),$callDeferred(null,null,!0)},$recover=function(){return null===$panicStackDepth||void 0!==$panicStackDepth&&$panicStackDepth!==$getStackDepth()-2?$ifaceNil:($panicStackDepth=null,$panicValue)},$throw=function(e){throw e},$noGoroutine={asleep:!1,exit:!1,deferStack:[],panicStack:[]},$curGoroutine=$noGoroutine,$totalGoroutines=0,$awakeGoroutines=0,$checkForDeadlock=!0,$mainFinished=!1,$go=function(e,n){$totalGoroutines++,$awakeGoroutines++;var r=function(){try{$curGoroutine=r;var t=e.apply(void 0,n);if(t&&void 0!==t.$blk)return e=function(){return t.$blk()},void(n=[]);r.exit=!0}catch(e){if(!r.exit)throw e}finally{$curGoroutine=$noGoroutine,r.exit&&($totalGoroutines--,r.asleep=!0),r.asleep&&($awakeGoroutines--,!$mainFinished&&0===$awakeGoroutines&&$checkForDeadlock&&(console.error(\"fatal error: all goroutines are asleep - deadlock!\"),void 0!==$global.process&&$global.process.exit(2)))}};r.asleep=!1,r.exit=!1,r.deferStack=[],r.panicStack=[],$schedule(r)},$scheduled=[],$runScheduled=function(){try{for(var e;void 0!==(e=$scheduled.shift());)e()}finally{$scheduled.length>0&&setTimeout($runScheduled,0)}},$schedule=function(e){e.asleep&&(e.asleep=!1,$awakeGoroutines++),$scheduled.push(e),$curGoroutine===$noGoroutine&&$runScheduled()},$setTimeout=function(e,n){return $awakeGoroutines++,setTimeout(function(){$awakeGoroutines--,e()},n)},$block=function(){$curGoroutine===$noGoroutine&&$throwRuntimeError(\"cannot block in JavaScript callback, fix by wrapping code in goroutine\"),$curGoroutine.asleep=!0},$send=function(e,n){e.$closed&&$throwRuntimeError(\"send on closed channel\");var r=e.$recvQueue.shift();if(void 0===r){if(!(e.$buffer.length65535){var u=Math.floor((c-65536)/1024)+55296,l=(c-65536)%1024+56320;$+=String.fromCharCode(u,l)}else $+=String.fromCharCode(c)}return $;case $kindStruct:var s=$packages.time;if(void 0!==s&&e.constructor===s.Time.ptr){var f=$div64(e.UnixNano(),new $Int64(0,1e6));return new Date($flatten64(f))}var d={},p=function(e,n){if(n===$jsObjectPtr)return e;switch(n.kind){case $kindPtr:return e===n.nil?d:p(e.$get(),n.elem);case $kindStruct:var r=n.fields[0];return p(e[r.prop],r.typ);case $kindInterface:return p(e.$val,e.constructor);default:return d}},h=p(e,n);if(h!==d)return h;h={};for(i=0;i>24;case $kindInt16:return parseInt(e)<<16>>16;case $kindInt32:return parseInt(e)>>0;case $kindUint:return parseInt(e);case $kindUint8:return parseInt(e)<<24>>>24;case $kindUint16:return parseInt(e)<<16>>>16;case $kindUint32:case $kindUintptr:return parseInt(e)>>>0;case $kindInt64:case $kindUint64:return new n(0,e);case $kindFloat32:case $kindFloat64:return parseFloat(e);case $kindArray:return e.length!==n.len&&$throwRuntimeError(\"got array with wrong size from JavaScript native\"),$mapArray(e,function(e){return $internalize(e,n.elem)});case $kindFunc:return function(){for(var t=[],i=0;i=128)return!1;return!0};\n" diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/types.go b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/types.go deleted file mode 100644 index 0d37509ba..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/types.go +++ /dev/null @@ -1,747 +0,0 @@ -package prelude - -const types = ` -var $kindBool = 1; -var $kindInt = 2; -var $kindInt8 = 3; -var $kindInt16 = 4; -var $kindInt32 = 5; -var $kindInt64 = 6; -var $kindUint = 7; -var $kindUint8 = 8; -var $kindUint16 = 9; -var $kindUint32 = 10; -var $kindUint64 = 11; -var $kindUintptr = 12; -var $kindFloat32 = 13; -var $kindFloat64 = 14; -var $kindComplex64 = 15; -var $kindComplex128 = 16; -var $kindArray = 17; -var $kindChan = 18; -var $kindFunc = 19; -var $kindInterface = 20; -var $kindMap = 21; -var $kindPtr = 22; -var $kindSlice = 23; -var $kindString = 24; -var $kindStruct = 25; -var $kindUnsafePointer = 26; - -var $methodSynthesizers = []; -var $addMethodSynthesizer = function(f) { - if ($methodSynthesizers === null) { - f(); - return; - } - $methodSynthesizers.push(f); -}; -var $synthesizeMethods = function() { - $methodSynthesizers.forEach(function(f) { f(); }); - $methodSynthesizers = null; -}; - -var $ifaceKeyFor = function(x) { - if (x === $ifaceNil) { - return 'nil'; - } - var c = x.constructor; - return c.string + '$' + c.keyFor(x.$val); -}; - -var $identity = function(x) { return x; }; - -var $typeIDCounter = 0; - -var $idKey = function(x) { - if (x.$id === undefined) { - $idCounter++; - x.$id = $idCounter; - } - return String(x.$id); -}; - -var $newType = function(size, kind, string, named, pkg, exported, constructor) { - var typ; - switch(kind) { - case $kindBool: - case $kindInt: - case $kindInt8: - case $kindInt16: - case $kindInt32: - case $kindUint: - case $kindUint8: - case $kindUint16: - case $kindUint32: - case $kindUintptr: - case $kindUnsafePointer: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.keyFor = $identity; - break; - - case $kindString: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.keyFor = function(x) { return "$" + x; }; - break; - - case $kindFloat32: - case $kindFloat64: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.keyFor = function(x) { return $floatKey(x); }; - break; - - case $kindInt64: - typ = function(high, low) { - this.$high = (high + Math.floor(Math.ceil(low) / 4294967296)) >> 0; - this.$low = low >>> 0; - this.$val = this; - }; - typ.keyFor = function(x) { return x.$high + "$" + x.$low; }; - break; - - case $kindUint64: - typ = function(high, low) { - this.$high = (high + Math.floor(Math.ceil(low) / 4294967296)) >>> 0; - this.$low = low >>> 0; - this.$val = this; - }; - typ.keyFor = function(x) { return x.$high + "$" + x.$low; }; - break; - - case $kindComplex64: - typ = function(real, imag) { - this.$real = $fround(real); - this.$imag = $fround(imag); - this.$val = this; - }; - typ.keyFor = function(x) { return x.$real + "$" + x.$imag; }; - break; - - case $kindComplex128: - typ = function(real, imag) { - this.$real = real; - this.$imag = imag; - this.$val = this; - }; - typ.keyFor = function(x) { return x.$real + "$" + x.$imag; }; - break; - - case $kindArray: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.ptr = $newType(4, $kindPtr, "*" + string, false, "", false, function(array) { - this.$get = function() { return array; }; - this.$set = function(v) { typ.copy(this, v); }; - this.$val = array; - }); - typ.init = function(elem, len) { - typ.elem = elem; - typ.len = len; - typ.comparable = elem.comparable; - typ.keyFor = function(x) { - return Array.prototype.join.call($mapArray(x, function(e) { - return String(elem.keyFor(e)).replace(/\\/g, "\\\\").replace(/\$/g, "\\$"); - }), "$"); - }; - typ.copy = function(dst, src) { - $copyArray(dst, src, 0, 0, src.length, elem); - }; - typ.ptr.init(typ); - Object.defineProperty(typ.ptr.nil, "nilCheck", { get: $throwNilPointerError }); - }; - break; - - case $kindChan: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.keyFor = $idKey; - typ.init = function(elem, sendOnly, recvOnly) { - typ.elem = elem; - typ.sendOnly = sendOnly; - typ.recvOnly = recvOnly; - }; - break; - - case $kindFunc: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.init = function(params, results, variadic) { - typ.params = params; - typ.results = results; - typ.variadic = variadic; - typ.comparable = false; - }; - break; - - case $kindInterface: - typ = { implementedBy: {}, missingMethodFor: {} }; - typ.keyFor = $ifaceKeyFor; - typ.init = function(methods) { - typ.methods = methods; - methods.forEach(function(m) { - $ifaceNil[m.prop] = $throwNilPointerError; - }); - }; - break; - - case $kindMap: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.init = function(key, elem) { - typ.key = key; - typ.elem = elem; - typ.comparable = false; - }; - break; - - case $kindPtr: - typ = constructor || function(getter, setter, target) { - this.$get = getter; - this.$set = setter; - this.$target = target; - this.$val = this; - }; - typ.keyFor = $idKey; - typ.init = function(elem) { - typ.elem = elem; - typ.wrapped = (elem.kind === $kindArray); - typ.nil = new typ($throwNilPointerError, $throwNilPointerError); - }; - break; - - case $kindSlice: - typ = function(array) { - if (array.constructor !== typ.nativeArray) { - array = new typ.nativeArray(array); - } - this.$array = array; - this.$offset = 0; - this.$length = array.length; - this.$capacity = array.length; - this.$val = this; - }; - typ.init = function(elem) { - typ.elem = elem; - typ.comparable = false; - typ.nativeArray = $nativeArray(elem.kind); - typ.nil = new typ([]); - }; - break; - - case $kindStruct: - typ = function(v) { this.$val = v; }; - typ.wrapped = true; - typ.ptr = $newType(4, $kindPtr, "*" + string, false, pkg, exported, constructor); - typ.ptr.elem = typ; - typ.ptr.prototype.$get = function() { return this; }; - typ.ptr.prototype.$set = function(v) { typ.copy(this, v); }; - typ.init = function(pkgPath, fields) { - typ.pkgPath = pkgPath; - typ.fields = fields; - fields.forEach(function(f) { - if (!f.typ.comparable) { - typ.comparable = false; - } - }); - typ.keyFor = function(x) { - var val = x.$val; - return $mapArray(fields, function(f) { - return String(f.typ.keyFor(val[f.prop])).replace(/\\/g, "\\\\").replace(/\$/g, "\\$"); - }).join("$"); - }; - typ.copy = function(dst, src) { - for (var i = 0; i < fields.length; i++) { - var f = fields[i]; - switch (f.typ.kind) { - case $kindArray: - case $kindStruct: - f.typ.copy(dst[f.prop], src[f.prop]); - continue; - default: - dst[f.prop] = src[f.prop]; - continue; - } - } - }; - /* nil value */ - var properties = {}; - fields.forEach(function(f) { - properties[f.prop] = { get: $throwNilPointerError, set: $throwNilPointerError }; - }); - typ.ptr.nil = Object.create(constructor.prototype, properties); - typ.ptr.nil.$val = typ.ptr.nil; - /* methods for embedded fields */ - $addMethodSynthesizer(function() { - var synthesizeMethod = function(target, m, f) { - if (target.prototype[m.prop] !== undefined) { return; } - target.prototype[m.prop] = function() { - var v = this.$val[f.prop]; - if (f.typ === $jsObjectPtr) { - v = new $jsObjectPtr(v); - } - if (v.$val === undefined) { - v = new f.typ(v); - } - return v[m.prop].apply(v, arguments); - }; - }; - fields.forEach(function(f) { - if (f.embedded) { - $methodSet(f.typ).forEach(function(m) { - synthesizeMethod(typ, m, f); - synthesizeMethod(typ.ptr, m, f); - }); - $methodSet($ptrType(f.typ)).forEach(function(m) { - synthesizeMethod(typ.ptr, m, f); - }); - } - }); - }); - }; - break; - - default: - $panic(new $String("invalid kind: " + kind)); - } - - switch (kind) { - case $kindBool: - case $kindMap: - typ.zero = function() { return false; }; - break; - - case $kindInt: - case $kindInt8: - case $kindInt16: - case $kindInt32: - case $kindUint: - case $kindUint8 : - case $kindUint16: - case $kindUint32: - case $kindUintptr: - case $kindUnsafePointer: - case $kindFloat32: - case $kindFloat64: - typ.zero = function() { return 0; }; - break; - - case $kindString: - typ.zero = function() { return ""; }; - break; - - case $kindInt64: - case $kindUint64: - case $kindComplex64: - case $kindComplex128: - var zero = new typ(0, 0); - typ.zero = function() { return zero; }; - break; - - case $kindPtr: - case $kindSlice: - typ.zero = function() { return typ.nil; }; - break; - - case $kindChan: - typ.zero = function() { return $chanNil; }; - break; - - case $kindFunc: - typ.zero = function() { return $throwNilPointerError; }; - break; - - case $kindInterface: - typ.zero = function() { return $ifaceNil; }; - break; - - case $kindArray: - typ.zero = function() { - var arrayClass = $nativeArray(typ.elem.kind); - if (arrayClass !== Array) { - return new arrayClass(typ.len); - } - var array = new Array(typ.len); - for (var i = 0; i < typ.len; i++) { - array[i] = typ.elem.zero(); - } - return array; - }; - break; - - case $kindStruct: - typ.zero = function() { return new typ.ptr(); }; - break; - - default: - $panic(new $String("invalid kind: " + kind)); - } - - typ.id = $typeIDCounter; - $typeIDCounter++; - typ.size = size; - typ.kind = kind; - typ.string = string; - typ.named = named; - typ.pkg = pkg; - typ.exported = exported; - typ.methods = []; - typ.methodSetCache = null; - typ.comparable = true; - return typ; -}; - -var $methodSet = function(typ) { - if (typ.methodSetCache !== null) { - return typ.methodSetCache; - } - var base = {}; - - var isPtr = (typ.kind === $kindPtr); - if (isPtr && typ.elem.kind === $kindInterface) { - typ.methodSetCache = []; - return []; - } - - var current = [{typ: isPtr ? typ.elem : typ, indirect: isPtr}]; - - var seen = {}; - - while (current.length > 0) { - var next = []; - var mset = []; - - current.forEach(function(e) { - if (seen[e.typ.string]) { - return; - } - seen[e.typ.string] = true; - - if (e.typ.named) { - mset = mset.concat(e.typ.methods); - if (e.indirect) { - mset = mset.concat($ptrType(e.typ).methods); - } - } - - switch (e.typ.kind) { - case $kindStruct: - e.typ.fields.forEach(function(f) { - if (f.embedded) { - var fTyp = f.typ; - var fIsPtr = (fTyp.kind === $kindPtr); - next.push({typ: fIsPtr ? fTyp.elem : fTyp, indirect: e.indirect || fIsPtr}); - } - }); - break; - - case $kindInterface: - mset = mset.concat(e.typ.methods); - break; - } - }); - - mset.forEach(function(m) { - if (base[m.name] === undefined) { - base[m.name] = m; - } - }); - - current = next; - } - - typ.methodSetCache = []; - Object.keys(base).sort().forEach(function(name) { - typ.methodSetCache.push(base[name]); - }); - return typ.methodSetCache; -}; - -var $Bool = $newType( 1, $kindBool, "bool", true, "", false, null); -var $Int = $newType( 4, $kindInt, "int", true, "", false, null); -var $Int8 = $newType( 1, $kindInt8, "int8", true, "", false, null); -var $Int16 = $newType( 2, $kindInt16, "int16", true, "", false, null); -var $Int32 = $newType( 4, $kindInt32, "int32", true, "", false, null); -var $Int64 = $newType( 8, $kindInt64, "int64", true, "", false, null); -var $Uint = $newType( 4, $kindUint, "uint", true, "", false, null); -var $Uint8 = $newType( 1, $kindUint8, "uint8", true, "", false, null); -var $Uint16 = $newType( 2, $kindUint16, "uint16", true, "", false, null); -var $Uint32 = $newType( 4, $kindUint32, "uint32", true, "", false, null); -var $Uint64 = $newType( 8, $kindUint64, "uint64", true, "", false, null); -var $Uintptr = $newType( 4, $kindUintptr, "uintptr", true, "", false, null); -var $Float32 = $newType( 4, $kindFloat32, "float32", true, "", false, null); -var $Float64 = $newType( 8, $kindFloat64, "float64", true, "", false, null); -var $Complex64 = $newType( 8, $kindComplex64, "complex64", true, "", false, null); -var $Complex128 = $newType(16, $kindComplex128, "complex128", true, "", false, null); -var $String = $newType( 8, $kindString, "string", true, "", false, null); -var $UnsafePointer = $newType( 4, $kindUnsafePointer, "unsafe.Pointer", true, "", false, null); - -var $nativeArray = function(elemKind) { - switch (elemKind) { - case $kindInt: - return Int32Array; - case $kindInt8: - return Int8Array; - case $kindInt16: - return Int16Array; - case $kindInt32: - return Int32Array; - case $kindUint: - return Uint32Array; - case $kindUint8: - return Uint8Array; - case $kindUint16: - return Uint16Array; - case $kindUint32: - return Uint32Array; - case $kindUintptr: - return Uint32Array; - case $kindFloat32: - return Float32Array; - case $kindFloat64: - return Float64Array; - default: - return Array; - } -}; -var $toNativeArray = function(elemKind, array) { - var nativeArray = $nativeArray(elemKind); - if (nativeArray === Array) { - return array; - } - return new nativeArray(array); -}; -var $arrayTypes = {}; -var $arrayType = function(elem, len) { - var typeKey = elem.id + "$" + len; - var typ = $arrayTypes[typeKey]; - if (typ === undefined) { - typ = $newType(12, $kindArray, "[" + len + "]" + elem.string, false, "", false, null); - $arrayTypes[typeKey] = typ; - typ.init(elem, len); - } - return typ; -}; - -var $chanType = function(elem, sendOnly, recvOnly) { - var string = (recvOnly ? "<-" : "") + "chan" + (sendOnly ? "<- " : " ") + elem.string; - var field = sendOnly ? "SendChan" : (recvOnly ? "RecvChan" : "Chan"); - var typ = elem[field]; - if (typ === undefined) { - typ = $newType(4, $kindChan, string, false, "", false, null); - elem[field] = typ; - typ.init(elem, sendOnly, recvOnly); - } - return typ; -}; -var $Chan = function(elem, capacity) { - if (capacity < 0 || capacity > 2147483647) { - $throwRuntimeError("makechan: size out of range"); - } - this.$elem = elem; - this.$capacity = capacity; - this.$buffer = []; - this.$sendQueue = []; - this.$recvQueue = []; - this.$closed = false; -}; -var $chanNil = new $Chan(null, 0); -$chanNil.$sendQueue = $chanNil.$recvQueue = { length: 0, push: function() {}, shift: function() { return undefined; }, indexOf: function() { return -1; } }; - -var $funcTypes = {}; -var $funcType = function(params, results, variadic) { - var typeKey = $mapArray(params, function(p) { return p.id; }).join(",") + "$" + $mapArray(results, function(r) { return r.id; }).join(",") + "$" + variadic; - var typ = $funcTypes[typeKey]; - if (typ === undefined) { - var paramTypes = $mapArray(params, function(p) { return p.string; }); - if (variadic) { - paramTypes[paramTypes.length - 1] = "..." + paramTypes[paramTypes.length - 1].substr(2); - } - var string = "func(" + paramTypes.join(", ") + ")"; - if (results.length === 1) { - string += " " + results[0].string; - } else if (results.length > 1) { - string += " (" + $mapArray(results, function(r) { return r.string; }).join(", ") + ")"; - } - typ = $newType(4, $kindFunc, string, false, "", false, null); - $funcTypes[typeKey] = typ; - typ.init(params, results, variadic); - } - return typ; -}; - -var $interfaceTypes = {}; -var $interfaceType = function(methods) { - var typeKey = $mapArray(methods, function(m) { return m.pkg + "," + m.name + "," + m.typ.id; }).join("$"); - var typ = $interfaceTypes[typeKey]; - if (typ === undefined) { - var string = "interface {}"; - if (methods.length !== 0) { - string = "interface { " + $mapArray(methods, function(m) { - return (m.pkg !== "" ? m.pkg + "." : "") + m.name + m.typ.string.substr(4); - }).join("; ") + " }"; - } - typ = $newType(8, $kindInterface, string, false, "", false, null); - $interfaceTypes[typeKey] = typ; - typ.init(methods); - } - return typ; -}; -var $emptyInterface = $interfaceType([]); -var $ifaceNil = {}; -var $error = $newType(8, $kindInterface, "error", true, "", false, null); -$error.init([{prop: "Error", name: "Error", pkg: "", typ: $funcType([], [$String], false)}]); - -var $mapTypes = {}; -var $mapType = function(key, elem) { - var typeKey = key.id + "$" + elem.id; - var typ = $mapTypes[typeKey]; - if (typ === undefined) { - typ = $newType(4, $kindMap, "map[" + key.string + "]" + elem.string, false, "", false, null); - $mapTypes[typeKey] = typ; - typ.init(key, elem); - } - return typ; -}; -var $makeMap = function(keyForFunc, entries) { - var m = {}; - for (var i = 0; i < entries.length; i++) { - var e = entries[i]; - m[keyForFunc(e.k)] = e; - } - return m; -}; - -var $ptrType = function(elem) { - var typ = elem.ptr; - if (typ === undefined) { - typ = $newType(4, $kindPtr, "*" + elem.string, false, "", elem.exported, null); - elem.ptr = typ; - typ.init(elem); - } - return typ; -}; - -var $newDataPointer = function(data, constructor) { - if (constructor.elem.kind === $kindStruct) { - return data; - } - return new constructor(function() { return data; }, function(v) { data = v; }); -}; - -var $indexPtr = function(array, index, constructor) { - array.$ptr = array.$ptr || {}; - return array.$ptr[index] || (array.$ptr[index] = new constructor(function() { return array[index]; }, function(v) { array[index] = v; })); -}; - -var $sliceType = function(elem) { - var typ = elem.slice; - if (typ === undefined) { - typ = $newType(12, $kindSlice, "[]" + elem.string, false, "", false, null); - elem.slice = typ; - typ.init(elem); - } - return typ; -}; -var $makeSlice = function(typ, length, capacity) { - capacity = capacity || length; - if (length < 0 || length > 2147483647) { - $throwRuntimeError("makeslice: len out of range"); - } - if (capacity < 0 || capacity < length || capacity > 2147483647) { - $throwRuntimeError("makeslice: cap out of range"); - } - var array = new typ.nativeArray(capacity); - if (typ.nativeArray === Array) { - for (var i = 0; i < capacity; i++) { - array[i] = typ.elem.zero(); - } - } - var slice = new typ(array); - slice.$length = length; - return slice; -}; - -var $structTypes = {}; -var $structType = function(pkgPath, fields) { - var typeKey = $mapArray(fields, function(f) { return f.name + "," + f.typ.id + "," + f.tag; }).join("$"); - var typ = $structTypes[typeKey]; - if (typ === undefined) { - var string = "struct { " + $mapArray(fields, function(f) { - return f.name + " " + f.typ.string + (f.tag !== "" ? (" \"" + f.tag.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\"") : ""); - }).join("; ") + " }"; - if (fields.length === 0) { - string = "struct {}"; - } - typ = $newType(0, $kindStruct, string, false, "", false, function() { - this.$val = this; - for (var i = 0; i < fields.length; i++) { - var f = fields[i]; - var arg = arguments[i]; - this[f.prop] = arg !== undefined ? arg : f.typ.zero(); - } - }); - $structTypes[typeKey] = typ; - typ.init(pkgPath, fields); - } - return typ; -}; - -var $assertType = function(value, type, returnTuple) { - var isInterface = (type.kind === $kindInterface), ok, missingMethod = ""; - if (value === $ifaceNil) { - ok = false; - } else if (!isInterface) { - ok = value.constructor === type; - } else { - var valueTypeString = value.constructor.string; - ok = type.implementedBy[valueTypeString]; - if (ok === undefined) { - ok = true; - var valueMethodSet = $methodSet(value.constructor); - var interfaceMethods = type.methods; - for (var i = 0; i < interfaceMethods.length; i++) { - var tm = interfaceMethods[i]; - var found = false; - for (var j = 0; j < valueMethodSet.length; j++) { - var vm = valueMethodSet[j]; - if (vm.name === tm.name && vm.pkg === tm.pkg && vm.typ === tm.typ) { - found = true; - break; - } - } - if (!found) { - ok = false; - type.missingMethodFor[valueTypeString] = tm.name; - break; - } - } - type.implementedBy[valueTypeString] = ok; - } - if (!ok) { - missingMethod = type.missingMethodFor[valueTypeString]; - } - } - - if (!ok) { - if (returnTuple) { - return [type.zero(), false]; - } - $panic(new $packages["runtime"].TypeAssertionError.ptr( - $packages["runtime"]._type.ptr.nil, - (value === $ifaceNil ? $packages["runtime"]._type.ptr.nil : new $packages["runtime"]._type.ptr(value.constructor.string)), - new $packages["runtime"]._type.ptr(type.string), - missingMethod)); - } - - if (!isInterface) { - value = value.$val; - } - if (type === $jsObjectPtr) { - value = value.object; - } - return returnTuple ? [value, true] : value; -}; -` diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/uglifyjs_options.json b/vendor/github.com/gopherjs/gopherjs/compiler/prelude/uglifyjs_options.json deleted file mode 100644 index b603add95..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/prelude/uglifyjs_options.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "compress": { - "arrows": true, - "booleans": true, - "collapse_vars": true, - "comparisons": true, - "computed_props": true, - "conditionals": true, - "dead_code": true, - "drop_console": false, - "drop_debugger": true, - "ecma": 5, - "evaluate": true, - "expression": false, - "global_defs": {}, - "hoist_funs": false, - "hoist_props": true, - "hoist_vars": false, - "ie8": false, - "if_return": true, - "inline": true, - "join_vars": true, - "keep_classnames": false, - "keep_fargs": true, - "keep_fnames": false, - "keep_infinity": false, - "loops": true, - "negate_iife": true, - "passes": 1, - "properties": true, - "pure_funcs": null, - "pure_getters": "strict", - "reduce_funcs": true, - "reduce_vars": true, - "sequences": true, - "side_effects": true, - "switches": true, - "top_retain": null, - "toplevel": false, - "typeofs": true, - "unsafe": false, - "unsafe_Function": false, - "unsafe_arrows": false, - "unsafe_comps": false, - "unsafe_math": false, - "unsafe_methods": false, - "unsafe_proto": false, - "unsafe_regexp": false, - "unsafe_undefined": false, - "unused": true, - "warnings": false - }, - "mangle": { - "eval": false, - "ie8": false, - "keep_classnames": false, - "keep_fnames": false, - "properties": false, - "reserved": [], - "safari10": false, - "toplevel": false - }, - "output": { - "ascii_only": false, - "beautify": false, - "bracketize": false, - "comments": "/@license|@preserve|^!/", - "ecma": 5, - "ie8": false, - "indent_level": 4, - "indent_start": 0, - "inline_script": true, - "keep_quoted_props": false, - "max_line_len": false, - "preamble": null, - "preserve_line": false, - "quote_keys": false, - "quote_style": 0, - "safari10": false, - "semicolons": true, - "shebang": true, - "source_map": null, - "webkit": false, - "width": 80, - "wrap_iife": false - }, - "parse": { - "bare_returns": false, - "ecma": 8, - "expression": false, - "filename": null, - "html5_comments": true, - "shebang": true, - "strict": false, - "toplevel": null - }, - "wrap": false -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/statements.go b/vendor/github.com/gopherjs/gopherjs/compiler/statements.go deleted file mode 100644 index b83396235..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/statements.go +++ /dev/null @@ -1,786 +0,0 @@ -package compiler - -import ( - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "strings" - - "github.com/gopherjs/gopherjs/compiler/analysis" - "github.com/gopherjs/gopherjs/compiler/astutil" - "github.com/gopherjs/gopherjs/compiler/filter" - "github.com/gopherjs/gopherjs/compiler/typesutil" -) - -func (c *funcContext) translateStmtList(stmts []ast.Stmt) { - for _, stmt := range stmts { - c.translateStmt(stmt, nil) - } - c.SetPos(token.NoPos) -} - -func (c *funcContext) translateStmt(stmt ast.Stmt, label *types.Label) { - c.SetPos(stmt.Pos()) - - stmt = filter.IncDecStmt(stmt, c.p.Info.Info) - stmt = filter.Assign(stmt, c.p.Info.Info, c.p.Info.Pkg) - - switch s := stmt.(type) { - case *ast.BlockStmt: - c.translateStmtList(s.List) - - case *ast.IfStmt: - var caseClauses []*ast.CaseClause - ifStmt := s - for { - if ifStmt.Init != nil { - panic("simplification error") - } - caseClauses = append(caseClauses, &ast.CaseClause{List: []ast.Expr{ifStmt.Cond}, Body: ifStmt.Body.List}) - elseStmt, ok := ifStmt.Else.(*ast.IfStmt) - if !ok { - break - } - ifStmt = elseStmt - } - var defaultClause *ast.CaseClause - if block, ok := ifStmt.Else.(*ast.BlockStmt); ok { - defaultClause = &ast.CaseClause{Body: block.List} - } - c.translateBranchingStmt(caseClauses, defaultClause, false, c.translateExpr, nil, c.Flattened[s]) - - case *ast.SwitchStmt: - if s.Init != nil || s.Tag != nil || len(s.Body.List) != 1 { - panic("simplification error") - } - clause := s.Body.List[0].(*ast.CaseClause) - if len(clause.List) != 0 { - panic("simplification error") - } - - prevFlowData := c.flowDatas[nil] - data := &flowData{ - postStmt: prevFlowData.postStmt, // for "continue" of outer loop - beginCase: prevFlowData.beginCase, // same - } - c.flowDatas[nil] = data - c.flowDatas[label] = data - defer func() { - delete(c.flowDatas, label) - c.flowDatas[nil] = prevFlowData - }() - - if c.Flattened[s] { - data.endCase = c.caseCounter - c.caseCounter++ - - c.Indent(func() { - c.translateStmtList(clause.Body) - }) - c.Printf("case %d:", data.endCase) - return - } - - if label != nil || analysis.HasBreak(clause) { - if label != nil { - c.Printf("%s:", label.Name()) - } - c.Printf("switch (0) { default:") - c.Indent(func() { - c.translateStmtList(clause.Body) - }) - c.Printf("}") - return - } - - c.translateStmtList(clause.Body) - - case *ast.TypeSwitchStmt: - if s.Init != nil { - c.translateStmt(s.Init, nil) - } - refVar := c.newVariable("_ref") - var expr ast.Expr - switch a := s.Assign.(type) { - case *ast.AssignStmt: - expr = a.Rhs[0].(*ast.TypeAssertExpr).X - case *ast.ExprStmt: - expr = a.X.(*ast.TypeAssertExpr).X - } - c.Printf("%s = %s;", refVar, c.translateExpr(expr)) - translateCond := func(cond ast.Expr) *expression { - if types.Identical(c.p.TypeOf(cond), types.Typ[types.UntypedNil]) { - return c.formatExpr("%s === $ifaceNil", refVar) - } - return c.formatExpr("$assertType(%s, %s, true)[1]", refVar, c.typeName(c.p.TypeOf(cond))) - } - var caseClauses []*ast.CaseClause - var defaultClause *ast.CaseClause - for _, cc := range s.Body.List { - clause := cc.(*ast.CaseClause) - var bodyPrefix []ast.Stmt - if implicit := c.p.Implicits[clause]; implicit != nil { - value := refVar - if typesutil.IsJsObject(implicit.Type().Underlying()) { - value += ".$val.object" - } else if _, ok := implicit.Type().Underlying().(*types.Interface); !ok { - value += ".$val" - } - bodyPrefix = []ast.Stmt{&ast.AssignStmt{ - Lhs: []ast.Expr{c.newIdent(c.objectName(implicit), implicit.Type())}, - Tok: token.DEFINE, - Rhs: []ast.Expr{c.newIdent(value, implicit.Type())}, - }} - } - c := &ast.CaseClause{ - List: clause.List, - Body: append(bodyPrefix, clause.Body...), - } - if len(c.List) == 0 { - defaultClause = c - continue - } - caseClauses = append(caseClauses, c) - } - c.translateBranchingStmt(caseClauses, defaultClause, true, translateCond, label, c.Flattened[s]) - - case *ast.ForStmt: - if s.Init != nil { - c.translateStmt(s.Init, nil) - } - cond := func() string { - if s.Cond == nil { - return "true" - } - return c.translateExpr(s.Cond).String() - } - c.translateLoopingStmt(cond, s.Body, nil, func() { - if s.Post != nil { - c.translateStmt(s.Post, nil) - } - }, label, c.Flattened[s]) - - case *ast.RangeStmt: - refVar := c.newVariable("_ref") - c.Printf("%s = %s;", refVar, c.translateExpr(s.X)) - - switch t := c.p.TypeOf(s.X).Underlying().(type) { - case *types.Basic: - iVar := c.newVariable("_i") - c.Printf("%s = 0;", iVar) - runeVar := c.newVariable("_rune") - c.translateLoopingStmt(func() string { return iVar + " < " + refVar + ".length" }, s.Body, func() { - c.Printf("%s = $decodeRune(%s, %s);", runeVar, refVar, iVar) - if !isBlank(s.Key) { - c.Printf("%s", c.translateAssign(s.Key, c.newIdent(iVar, types.Typ[types.Int]), s.Tok == token.DEFINE)) - } - if !isBlank(s.Value) { - c.Printf("%s", c.translateAssign(s.Value, c.newIdent(runeVar+"[0]", types.Typ[types.Rune]), s.Tok == token.DEFINE)) - } - }, func() { - c.Printf("%s += %s[1];", iVar, runeVar) - }, label, c.Flattened[s]) - - case *types.Map: - iVar := c.newVariable("_i") - c.Printf("%s = 0;", iVar) - keysVar := c.newVariable("_keys") - c.Printf("%s = $keys(%s);", keysVar, refVar) - c.translateLoopingStmt(func() string { return iVar + " < " + keysVar + ".length" }, s.Body, func() { - entryVar := c.newVariable("_entry") - c.Printf("%s = %s[%s[%s]];", entryVar, refVar, keysVar, iVar) - c.translateStmt(&ast.IfStmt{ - Cond: c.newIdent(entryVar+" === undefined", types.Typ[types.Bool]), - Body: &ast.BlockStmt{List: []ast.Stmt{&ast.BranchStmt{Tok: token.CONTINUE}}}, - }, nil) - if !isBlank(s.Key) { - c.Printf("%s", c.translateAssign(s.Key, c.newIdent(entryVar+".k", t.Key()), s.Tok == token.DEFINE)) - } - if !isBlank(s.Value) { - c.Printf("%s", c.translateAssign(s.Value, c.newIdent(entryVar+".v", t.Elem()), s.Tok == token.DEFINE)) - } - }, func() { - c.Printf("%s++;", iVar) - }, label, c.Flattened[s]) - - case *types.Array, *types.Pointer, *types.Slice: - var length string - var elemType types.Type - switch t2 := t.(type) { - case *types.Array: - length = fmt.Sprintf("%d", t2.Len()) - elemType = t2.Elem() - case *types.Pointer: - length = fmt.Sprintf("%d", t2.Elem().Underlying().(*types.Array).Len()) - elemType = t2.Elem().Underlying().(*types.Array).Elem() - case *types.Slice: - length = refVar + ".$length" - elemType = t2.Elem() - } - iVar := c.newVariable("_i") - c.Printf("%s = 0;", iVar) - c.translateLoopingStmt(func() string { return iVar + " < " + length }, s.Body, func() { - if !isBlank(s.Key) { - c.Printf("%s", c.translateAssign(s.Key, c.newIdent(iVar, types.Typ[types.Int]), s.Tok == token.DEFINE)) - } - if !isBlank(s.Value) { - c.Printf("%s", c.translateAssign(s.Value, c.setType(&ast.IndexExpr{ - X: c.newIdent(refVar, t), - Index: c.newIdent(iVar, types.Typ[types.Int]), - }, elemType), s.Tok == token.DEFINE)) - } - }, func() { - c.Printf("%s++;", iVar) - }, label, c.Flattened[s]) - - case *types.Chan: - okVar := c.newIdent(c.newVariable("_ok"), types.Typ[types.Bool]) - key := s.Key - tok := s.Tok - if key == nil { - key = ast.NewIdent("_") - tok = token.ASSIGN - } - forStmt := &ast.ForStmt{ - Body: &ast.BlockStmt{ - List: []ast.Stmt{ - &ast.AssignStmt{ - Lhs: []ast.Expr{ - key, - okVar, - }, - Rhs: []ast.Expr{ - c.setType(&ast.UnaryExpr{X: c.newIdent(refVar, t), Op: token.ARROW}, types.NewTuple(types.NewVar(0, nil, "", t.Elem()), types.NewVar(0, nil, "", types.Typ[types.Bool]))), - }, - Tok: tok, - }, - &ast.IfStmt{ - Cond: &ast.UnaryExpr{X: okVar, Op: token.NOT}, - Body: &ast.BlockStmt{List: []ast.Stmt{&ast.BranchStmt{Tok: token.BREAK}}}, - }, - s.Body, - }, - }, - } - c.Flattened[forStmt] = true - c.translateStmt(forStmt, label) - - default: - panic("") - } - - case *ast.BranchStmt: - normalLabel := "" - blockingLabel := "" - data := c.flowDatas[nil] - if s.Label != nil { - normalLabel = " " + s.Label.Name - blockingLabel = " s" // use explicit label "s", because surrounding loop may not be flattened - data = c.flowDatas[c.p.Uses[s.Label].(*types.Label)] - } - switch s.Tok { - case token.BREAK: - c.PrintCond(data.endCase == 0, fmt.Sprintf("break%s;", normalLabel), fmt.Sprintf("$s = %d; continue%s;", data.endCase, blockingLabel)) - case token.CONTINUE: - data.postStmt() - c.PrintCond(data.beginCase == 0, fmt.Sprintf("continue%s;", normalLabel), fmt.Sprintf("$s = %d; continue%s;", data.beginCase, blockingLabel)) - case token.GOTO: - c.PrintCond(false, "goto "+s.Label.Name, fmt.Sprintf("$s = %d; continue;", c.labelCase(c.p.Uses[s.Label].(*types.Label)))) - case token.FALLTHROUGH: - // handled in CaseClause - default: - panic("Unhandled branch statment: " + s.Tok.String()) - } - - case *ast.ReturnStmt: - results := s.Results - if c.resultNames != nil { - if len(s.Results) != 0 { - c.translateStmt(&ast.AssignStmt{ - Lhs: c.resultNames, - Tok: token.ASSIGN, - Rhs: s.Results, - }, nil) - } - results = c.resultNames - } - rVal := c.translateResults(results) - if len(c.Flattened) != 0 { - c.Printf("$s = -1; return%s;", rVal) - return - } - c.Printf("return%s;", rVal) - - case *ast.DeferStmt: - isBuiltin := false - isJs := false - switch fun := s.Call.Fun.(type) { - case *ast.Ident: - var builtin *types.Builtin - builtin, isBuiltin = c.p.Uses[fun].(*types.Builtin) - if isBuiltin && builtin.Name() == "recover" { - c.Printf("$deferred.push([$recover, []]);") - return - } - case *ast.SelectorExpr: - isJs = typesutil.IsJsPackage(c.p.Uses[fun.Sel].Pkg()) - } - sig := c.p.TypeOf(s.Call.Fun).Underlying().(*types.Signature) - args := c.translateArgs(sig, s.Call.Args, s.Call.Ellipsis.IsValid()) - if isBuiltin || isJs { - vars := make([]string, len(s.Call.Args)) - callArgs := make([]ast.Expr, len(s.Call.Args)) - for i, arg := range s.Call.Args { - v := c.newVariable("_arg") - vars[i] = v - callArgs[i] = c.newIdent(v, c.p.TypeOf(arg)) - } - call := c.translateExpr(&ast.CallExpr{ - Fun: s.Call.Fun, - Args: callArgs, - Ellipsis: s.Call.Ellipsis, - }) - c.Printf("$deferred.push([function(%s) { %s; }, [%s]]);", strings.Join(vars, ", "), call, strings.Join(args, ", ")) - return - } - c.Printf("$deferred.push([%s, [%s]]);", c.translateExpr(s.Call.Fun), strings.Join(args, ", ")) - - case *ast.AssignStmt: - if s.Tok != token.ASSIGN && s.Tok != token.DEFINE { - panic(s.Tok) - } - - switch { - case len(s.Lhs) == 1 && len(s.Rhs) == 1: - lhs := astutil.RemoveParens(s.Lhs[0]) - if isBlank(lhs) { - c.Printf("$unused(%s);", c.translateExpr(s.Rhs[0])) - return - } - c.Printf("%s", c.translateAssign(lhs, s.Rhs[0], s.Tok == token.DEFINE)) - - case len(s.Lhs) > 1 && len(s.Rhs) == 1: - tupleVar := c.newVariable("_tuple") - c.Printf("%s = %s;", tupleVar, c.translateExpr(s.Rhs[0])) - tuple := c.p.TypeOf(s.Rhs[0]).(*types.Tuple) - for i, lhs := range s.Lhs { - lhs = astutil.RemoveParens(lhs) - if !isBlank(lhs) { - c.Printf("%s", c.translateAssign(lhs, c.newIdent(fmt.Sprintf("%s[%d]", tupleVar, i), tuple.At(i).Type()), s.Tok == token.DEFINE)) - } - } - case len(s.Lhs) == len(s.Rhs): - tmpVars := make([]string, len(s.Rhs)) - for i, rhs := range s.Rhs { - tmpVars[i] = c.newVariable("_tmp") - if isBlank(astutil.RemoveParens(s.Lhs[i])) { - c.Printf("$unused(%s);", c.translateExpr(rhs)) - continue - } - c.Printf("%s", c.translateAssign(c.newIdent(tmpVars[i], c.p.TypeOf(s.Lhs[i])), rhs, true)) - } - for i, lhs := range s.Lhs { - lhs = astutil.RemoveParens(lhs) - if !isBlank(lhs) { - c.Printf("%s", c.translateAssign(lhs, c.newIdent(tmpVars[i], c.p.TypeOf(lhs)), s.Tok == token.DEFINE)) - } - } - - default: - panic("Invalid arity of AssignStmt.") - - } - - case *ast.DeclStmt: - decl := s.Decl.(*ast.GenDecl) - switch decl.Tok { - case token.VAR: - for _, spec := range s.Decl.(*ast.GenDecl).Specs { - valueSpec := spec.(*ast.ValueSpec) - lhs := make([]ast.Expr, len(valueSpec.Names)) - for i, name := range valueSpec.Names { - lhs[i] = name - } - rhs := valueSpec.Values - if len(rhs) == 0 { - rhs = make([]ast.Expr, len(lhs)) - for i, e := range lhs { - rhs[i] = c.zeroValue(c.p.TypeOf(e)) - } - } - c.translateStmt(&ast.AssignStmt{ - Lhs: lhs, - Tok: token.DEFINE, - Rhs: rhs, - }, nil) - } - case token.TYPE: - for _, spec := range decl.Specs { - o := c.p.Defs[spec.(*ast.TypeSpec).Name].(*types.TypeName) - c.p.typeNames = append(c.p.typeNames, o) - c.p.objectNames[o] = c.newVariableWithLevel(o.Name(), true) - c.p.dependencies[o] = true - } - case token.CONST: - // skip, constants are inlined - } - - case *ast.ExprStmt: - expr := c.translateExpr(s.X) - if expr != nil && expr.String() != "" { - c.Printf("%s;", expr) - } - - case *ast.LabeledStmt: - label := c.p.Defs[s.Label].(*types.Label) - if c.GotoLabel[label] { - c.PrintCond(false, s.Label.Name+":", fmt.Sprintf("case %d:", c.labelCase(label))) - } - c.translateStmt(s.Stmt, label) - - case *ast.GoStmt: - c.Printf("$go(%s, [%s]);", c.translateExpr(s.Call.Fun), strings.Join(c.translateArgs(c.p.TypeOf(s.Call.Fun).Underlying().(*types.Signature), s.Call.Args, s.Call.Ellipsis.IsValid()), ", ")) - - case *ast.SendStmt: - chanType := c.p.TypeOf(s.Chan).Underlying().(*types.Chan) - call := &ast.CallExpr{ - Fun: c.newIdent("$send", types.NewSignature(nil, types.NewTuple(types.NewVar(0, nil, "", chanType), types.NewVar(0, nil, "", chanType.Elem())), nil, false)), - Args: []ast.Expr{s.Chan, c.newIdent(c.translateImplicitConversionWithCloning(s.Value, chanType.Elem()).String(), chanType.Elem())}, - } - c.Blocking[call] = true - c.translateStmt(&ast.ExprStmt{X: call}, label) - - case *ast.SelectStmt: - selectionVar := c.newVariable("_selection") - var channels []string - var caseClauses []*ast.CaseClause - flattened := false - hasDefault := false - for i, cc := range s.Body.List { - clause := cc.(*ast.CommClause) - switch comm := clause.Comm.(type) { - case nil: - channels = append(channels, "[]") - hasDefault = true - case *ast.ExprStmt: - channels = append(channels, c.formatExpr("[%e]", astutil.RemoveParens(comm.X).(*ast.UnaryExpr).X).String()) - case *ast.AssignStmt: - channels = append(channels, c.formatExpr("[%e]", astutil.RemoveParens(comm.Rhs[0]).(*ast.UnaryExpr).X).String()) - case *ast.SendStmt: - chanType := c.p.TypeOf(comm.Chan).Underlying().(*types.Chan) - channels = append(channels, c.formatExpr("[%e, %s]", comm.Chan, c.translateImplicitConversionWithCloning(comm.Value, chanType.Elem())).String()) - default: - panic(fmt.Sprintf("unhandled: %T", comm)) - } - - indexLit := &ast.BasicLit{Kind: token.INT} - c.p.Types[indexLit] = types.TypeAndValue{Type: types.Typ[types.Int], Value: constant.MakeInt64(int64(i))} - - var bodyPrefix []ast.Stmt - if assign, ok := clause.Comm.(*ast.AssignStmt); ok { - switch rhsType := c.p.TypeOf(assign.Rhs[0]).(type) { - case *types.Tuple: - bodyPrefix = []ast.Stmt{&ast.AssignStmt{Lhs: assign.Lhs, Rhs: []ast.Expr{c.newIdent(selectionVar+"[1]", rhsType)}, Tok: assign.Tok}} - default: - bodyPrefix = []ast.Stmt{&ast.AssignStmt{Lhs: assign.Lhs, Rhs: []ast.Expr{c.newIdent(selectionVar+"[1][0]", rhsType)}, Tok: assign.Tok}} - } - } - - caseClauses = append(caseClauses, &ast.CaseClause{ - List: []ast.Expr{indexLit}, - Body: append(bodyPrefix, clause.Body...), - }) - - flattened = flattened || c.Flattened[clause] - } - - selectCall := c.setType(&ast.CallExpr{ - Fun: c.newIdent("$select", types.NewSignature(nil, types.NewTuple(types.NewVar(0, nil, "", types.NewInterface(nil, nil))), types.NewTuple(types.NewVar(0, nil, "", types.Typ[types.Int])), false)), - Args: []ast.Expr{c.newIdent(fmt.Sprintf("[%s]", strings.Join(channels, ", ")), types.NewInterface(nil, nil))}, - }, types.Typ[types.Int]) - c.Blocking[selectCall] = !hasDefault - c.Printf("%s = %s;", selectionVar, c.translateExpr(selectCall)) - - if len(caseClauses) != 0 { - translateCond := func(cond ast.Expr) *expression { - return c.formatExpr("%s[0] === %e", selectionVar, cond) - } - c.translateBranchingStmt(caseClauses, nil, true, translateCond, label, flattened) - } - - case *ast.EmptyStmt: - // skip - - default: - panic(fmt.Sprintf("Unhandled statement: %T\n", s)) - - } -} - -func (c *funcContext) translateBranchingStmt(caseClauses []*ast.CaseClause, defaultClause *ast.CaseClause, canBreak bool, translateCond func(ast.Expr) *expression, label *types.Label, flatten bool) { - var caseOffset, defaultCase, endCase int - if flatten { - caseOffset = c.caseCounter - defaultCase = caseOffset + len(caseClauses) - endCase = defaultCase - if defaultClause != nil { - endCase++ - } - c.caseCounter = endCase + 1 - } - - hasBreak := false - if canBreak { - prevFlowData := c.flowDatas[nil] - data := &flowData{ - postStmt: prevFlowData.postStmt, // for "continue" of outer loop - beginCase: prevFlowData.beginCase, // same - endCase: endCase, - } - c.flowDatas[nil] = data - c.flowDatas[label] = data - defer func() { - delete(c.flowDatas, label) - c.flowDatas[nil] = prevFlowData - }() - - for _, child := range caseClauses { - if analysis.HasBreak(child) { - hasBreak = true - break - } - } - if defaultClause != nil && analysis.HasBreak(defaultClause) { - hasBreak = true - } - } - - if label != nil && !flatten { - c.Printf("%s:", label.Name()) - } - - condStrs := make([]string, len(caseClauses)) - for i, clause := range caseClauses { - conds := make([]string, len(clause.List)) - for j, cond := range clause.List { - conds[j] = translateCond(cond).String() - } - condStrs[i] = strings.Join(conds, " || ") - if flatten { - c.Printf("/* */ if (%s) { $s = %d; continue; }", condStrs[i], caseOffset+i) - } - } - - if flatten { - c.Printf("/* */ $s = %d; continue;", defaultCase) - } - - prefix := "" - suffix := "" - if label != nil || hasBreak { - prefix = "switch (0) { default: " - suffix = " }" - } - - for i, clause := range caseClauses { - c.SetPos(clause.Pos()) - c.PrintCond(!flatten, fmt.Sprintf("%sif (%s) {", prefix, condStrs[i]), fmt.Sprintf("case %d:", caseOffset+i)) - c.Indent(func() { - c.translateStmtList(clause.Body) - if flatten && (i < len(caseClauses)-1 || defaultClause != nil) && !endsWithReturn(clause.Body) { - c.Printf("$s = %d; continue;", endCase) - } - }) - prefix = "} else " - } - - if defaultClause != nil { - c.PrintCond(!flatten, prefix+"{", fmt.Sprintf("case %d:", caseOffset+len(caseClauses))) - c.Indent(func() { - c.translateStmtList(defaultClause.Body) - }) - } - - c.PrintCond(!flatten, "}"+suffix, fmt.Sprintf("case %d:", endCase)) -} - -func (c *funcContext) translateLoopingStmt(cond func() string, body *ast.BlockStmt, bodyPrefix, post func(), label *types.Label, flatten bool) { - prevFlowData := c.flowDatas[nil] - data := &flowData{ - postStmt: post, - } - if flatten { - data.beginCase = c.caseCounter - data.endCase = c.caseCounter + 1 - c.caseCounter += 2 - } - c.flowDatas[nil] = data - c.flowDatas[label] = data - defer func() { - delete(c.flowDatas, label) - c.flowDatas[nil] = prevFlowData - }() - - if !flatten && label != nil { - c.Printf("%s:", label.Name()) - } - c.PrintCond(!flatten, "while (true) {", fmt.Sprintf("case %d:", data.beginCase)) - c.Indent(func() { - condStr := cond() - if condStr != "true" { - c.PrintCond(!flatten, fmt.Sprintf("if (!(%s)) { break; }", condStr), fmt.Sprintf("if(!(%s)) { $s = %d; continue; }", condStr, data.endCase)) - } - - prevEV := c.p.escapingVars - c.handleEscapingVars(body) - - if bodyPrefix != nil { - bodyPrefix() - } - c.translateStmtList(body.List) - isTerminated := false - if len(body.List) != 0 { - switch body.List[len(body.List)-1].(type) { - case *ast.ReturnStmt, *ast.BranchStmt: - isTerminated = true - } - } - if !isTerminated { - post() - } - - c.p.escapingVars = prevEV - }) - c.PrintCond(!flatten, "}", fmt.Sprintf("$s = %d; continue; case %d:", data.beginCase, data.endCase)) -} - -func (c *funcContext) translateAssign(lhs, rhs ast.Expr, define bool) string { - lhs = astutil.RemoveParens(lhs) - if isBlank(lhs) { - panic("translateAssign with blank lhs") - } - - if l, ok := lhs.(*ast.IndexExpr); ok { - if t, ok := c.p.TypeOf(l.X).Underlying().(*types.Map); ok { - if typesutil.IsJsObject(c.p.TypeOf(l.Index)) { - c.p.errList = append(c.p.errList, types.Error{Fset: c.p.fileSet, Pos: l.Index.Pos(), Msg: "cannot use js.Object as map key"}) - } - keyVar := c.newVariable("_key") - return fmt.Sprintf(`%s = %s; (%s || $throwRuntimeError("assignment to entry in nil map"))[%s.keyFor(%s)] = { k: %s, v: %s };`, keyVar, c.translateImplicitConversionWithCloning(l.Index, t.Key()), c.translateExpr(l.X), c.typeName(t.Key()), keyVar, keyVar, c.translateImplicitConversionWithCloning(rhs, t.Elem())) - } - } - - lhsType := c.p.TypeOf(lhs) - rhsExpr := c.translateImplicitConversion(rhs, lhsType) - if _, ok := rhs.(*ast.CompositeLit); ok && define { - return fmt.Sprintf("%s = %s;", c.translateExpr(lhs), rhsExpr) // skip $copy - } - - isReflectValue := false - if named, ok := lhsType.(*types.Named); ok && named.Obj().Pkg() != nil && named.Obj().Pkg().Path() == "reflect" && named.Obj().Name() == "Value" { - isReflectValue = true - } - if !isReflectValue { // this is a performance hack, but it is safe since reflect.Value has no exported fields and the reflect package does not violate this assumption - switch lhsType.Underlying().(type) { - case *types.Array, *types.Struct: - if define { - return fmt.Sprintf("%s = $clone(%s, %s);", c.translateExpr(lhs), rhsExpr, c.typeName(lhsType)) - } - return fmt.Sprintf("%s.copy(%s, %s);", c.typeName(lhsType), c.translateExpr(lhs), rhsExpr) - } - } - - switch l := lhs.(type) { - case *ast.Ident: - return fmt.Sprintf("%s = %s;", c.objectName(c.p.ObjectOf(l)), rhsExpr) - case *ast.SelectorExpr: - sel, ok := c.p.SelectionOf(l) - if !ok { - // qualified identifier - return fmt.Sprintf("%s = %s;", c.objectName(c.p.Uses[l.Sel]), rhsExpr) - } - fields, jsTag := c.translateSelection(sel, l.Pos()) - if jsTag != "" { - return fmt.Sprintf("%s.%s%s = %s;", c.translateExpr(l.X), strings.Join(fields, "."), formatJSStructTagVal(jsTag), c.externalize(rhsExpr.String(), sel.Type())) - } - return fmt.Sprintf("%s.%s = %s;", c.translateExpr(l.X), strings.Join(fields, "."), rhsExpr) - case *ast.StarExpr: - return fmt.Sprintf("%s.$set(%s);", c.translateExpr(l.X), rhsExpr) - case *ast.IndexExpr: - switch t := c.p.TypeOf(l.X).Underlying().(type) { - case *types.Array, *types.Pointer: - pattern := rangeCheck("%1e[%2f] = %3s", c.p.Types[l.Index].Value != nil, true) - if _, ok := t.(*types.Pointer); ok { // check pointer for nil (attribute getter causes a panic) - pattern = `%1e.nilCheck, ` + pattern - } - return c.formatExpr(pattern, l.X, l.Index, rhsExpr).String() + ";" - case *types.Slice: - return c.formatExpr(rangeCheck("%1e.$array[%1e.$offset + %2f] = %3s", c.p.Types[l.Index].Value != nil, false), l.X, l.Index, rhsExpr).String() + ";" - default: - panic(fmt.Sprintf("Unhandled lhs type: %T\n", t)) - } - default: - panic(fmt.Sprintf("Unhandled lhs type: %T\n", l)) - } -} - -func (c *funcContext) translateResults(results []ast.Expr) string { - tuple := c.sig.Results() - switch tuple.Len() { - case 0: - return "" - case 1: - result := c.zeroValue(tuple.At(0).Type()) - if results != nil { - result = results[0] - } - v := c.translateImplicitConversion(result, tuple.At(0).Type()) - c.delayedOutput = nil - return " " + v.String() - default: - if len(results) == 1 { - resultTuple := c.p.TypeOf(results[0]).(*types.Tuple) - - if resultTuple.Len() != tuple.Len() { - panic("invalid tuple return assignment") - } - - resultExpr := c.translateExpr(results[0]).String() - - if types.Identical(resultTuple, tuple) { - return " " + resultExpr - } - - tmpVar := c.newVariable("_returncast") - c.Printf("%s = %s;", tmpVar, resultExpr) - - // Not all the return types matched, map everything out for implicit casting - results = make([]ast.Expr, resultTuple.Len()) - for i := range results { - results[i] = c.newIdent(fmt.Sprintf("%s[%d]", tmpVar, i), resultTuple.At(i).Type()) - } - } - values := make([]string, tuple.Len()) - for i := range values { - result := c.zeroValue(tuple.At(i).Type()) - if results != nil { - result = results[i] - } - values[i] = c.translateImplicitConversion(result, tuple.At(i).Type()).String() - } - c.delayedOutput = nil - return " [" + strings.Join(values, ", ") + "]" - } -} - -func (c *funcContext) labelCase(label *types.Label) int { - labelCase, ok := c.labelCases[label] - if !ok { - labelCase = c.caseCounter - c.caseCounter++ - c.labelCases[label] = labelCase - } - return labelCase -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/typesutil/typesutil.go b/vendor/github.com/gopherjs/gopherjs/compiler/typesutil/typesutil.go deleted file mode 100644 index 600925b81..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/typesutil/typesutil.go +++ /dev/null @@ -1,16 +0,0 @@ -package typesutil - -import "go/types" - -func IsJsPackage(pkg *types.Package) bool { - return pkg != nil && pkg.Path() == "github.com/gopherjs/gopherjs/js" -} - -func IsJsObject(t types.Type) bool { - ptr, isPtr := t.(*types.Pointer) - if !isPtr { - return false - } - named, isNamed := ptr.Elem().(*types.Named) - return isNamed && IsJsPackage(named.Obj().Pkg()) && named.Obj().Name() == "Object" -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/utils.go b/vendor/github.com/gopherjs/gopherjs/compiler/utils.go deleted file mode 100644 index d5452e0a6..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/utils.go +++ /dev/null @@ -1,673 +0,0 @@ -package compiler - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "net/url" - "sort" - "strconv" - "strings" - "text/template" - "unicode" - - "github.com/gopherjs/gopherjs/compiler/analysis" - "github.com/gopherjs/gopherjs/compiler/typesutil" -) - -func (c *funcContext) Write(b []byte) (int, error) { - c.writePos() - c.output = append(c.output, b...) - return len(b), nil -} - -func (c *funcContext) Printf(format string, values ...interface{}) { - c.Write([]byte(strings.Repeat("\t", c.p.indentation))) - fmt.Fprintf(c, format, values...) - c.Write([]byte{'\n'}) - c.Write(c.delayedOutput) - c.delayedOutput = nil -} - -func (c *funcContext) PrintCond(cond bool, onTrue, onFalse string) { - if !cond { - c.Printf("/* %s */ %s", strings.Replace(onTrue, "*/", "/", -1), onFalse) - return - } - c.Printf("%s", onTrue) -} - -func (c *funcContext) SetPos(pos token.Pos) { - c.posAvailable = true - c.pos = pos -} - -func (c *funcContext) writePos() { - if c.posAvailable { - c.posAvailable = false - c.Write([]byte{'\b'}) - binary.Write(c, binary.BigEndian, uint32(c.pos)) - } -} - -func (c *funcContext) Indent(f func()) { - c.p.indentation++ - f() - c.p.indentation-- -} - -func (c *funcContext) CatchOutput(indent int, f func()) []byte { - origoutput := c.output - c.output = nil - c.p.indentation += indent - f() - c.writePos() - catched := c.output - c.output = origoutput - c.p.indentation -= indent - return catched -} - -func (c *funcContext) Delayed(f func()) { - c.delayedOutput = c.CatchOutput(0, f) -} - -func (c *funcContext) translateArgs(sig *types.Signature, argExprs []ast.Expr, ellipsis bool) []string { - if len(argExprs) == 1 { - if tuple, isTuple := c.p.TypeOf(argExprs[0]).(*types.Tuple); isTuple { - tupleVar := c.newVariable("_tuple") - c.Printf("%s = %s;", tupleVar, c.translateExpr(argExprs[0])) - argExprs = make([]ast.Expr, tuple.Len()) - for i := range argExprs { - argExprs[i] = c.newIdent(c.formatExpr("%s[%d]", tupleVar, i).String(), tuple.At(i).Type()) - } - } - } - - paramsLen := sig.Params().Len() - - var varargType *types.Slice - if sig.Variadic() && !ellipsis { - varargType = sig.Params().At(paramsLen - 1).Type().(*types.Slice) - } - - preserveOrder := false - for i := 1; i < len(argExprs); i++ { - preserveOrder = preserveOrder || c.Blocking[argExprs[i]] - } - - args := make([]string, len(argExprs)) - for i, argExpr := range argExprs { - var argType types.Type - switch { - case varargType != nil && i >= paramsLen-1: - argType = varargType.Elem() - default: - argType = sig.Params().At(i).Type() - } - - arg := c.translateImplicitConversionWithCloning(argExpr, argType).String() - - if preserveOrder && c.p.Types[argExpr].Value == nil { - argVar := c.newVariable("_arg") - c.Printf("%s = %s;", argVar, arg) - arg = argVar - } - - args[i] = arg - } - - if varargType != nil { - return append(args[:paramsLen-1], fmt.Sprintf("new %s([%s])", c.typeName(varargType), strings.Join(args[paramsLen-1:], ", "))) - } - return args -} - -func (c *funcContext) translateSelection(sel selection, pos token.Pos) ([]string, string) { - var fields []string - t := sel.Recv() - for _, index := range sel.Index() { - if ptr, isPtr := t.(*types.Pointer); isPtr { - t = ptr.Elem() - } - s := t.Underlying().(*types.Struct) - if jsTag := getJsTag(s.Tag(index)); jsTag != "" { - jsFieldName := s.Field(index).Name() - for { - fields = append(fields, fieldName(s, 0)) - ft := s.Field(0).Type() - if typesutil.IsJsObject(ft) { - return fields, jsTag - } - ft = ft.Underlying() - if ptr, ok := ft.(*types.Pointer); ok { - ft = ptr.Elem().Underlying() - } - var ok bool - s, ok = ft.(*types.Struct) - if !ok || s.NumFields() == 0 { - c.p.errList = append(c.p.errList, types.Error{Fset: c.p.fileSet, Pos: pos, Msg: fmt.Sprintf("could not find field with type *js.Object for 'js' tag of field '%s'", jsFieldName), Soft: true}) - return nil, "" - } - } - } - fields = append(fields, fieldName(s, index)) - t = s.Field(index).Type() - } - return fields, "" -} - -var nilObj = types.Universe.Lookup("nil") - -func (c *funcContext) zeroValue(ty types.Type) ast.Expr { - switch t := ty.Underlying().(type) { - case *types.Basic: - switch { - case isBoolean(t): - return c.newConst(ty, constant.MakeBool(false)) - case isNumeric(t): - return c.newConst(ty, constant.MakeInt64(0)) - case isString(t): - return c.newConst(ty, constant.MakeString("")) - case t.Kind() == types.UnsafePointer: - // fall through to "nil" - case t.Kind() == types.UntypedNil: - panic("Zero value for untyped nil.") - default: - panic(fmt.Sprintf("Unhandled basic type: %v\n", t)) - } - case *types.Array, *types.Struct: - return c.setType(&ast.CompositeLit{}, ty) - case *types.Chan, *types.Interface, *types.Map, *types.Signature, *types.Slice, *types.Pointer: - // fall through to "nil" - default: - panic(fmt.Sprintf("Unhandled type: %T\n", t)) - } - id := c.newIdent("nil", ty) - c.p.Uses[id] = nilObj - return id -} - -func (c *funcContext) newConst(t types.Type, value constant.Value) ast.Expr { - id := &ast.Ident{} - c.p.Types[id] = types.TypeAndValue{Type: t, Value: value} - return id -} - -func (c *funcContext) newVariable(name string) string { - return c.newVariableWithLevel(name, false) -} - -func (c *funcContext) newVariableWithLevel(name string, pkgLevel bool) string { - if name == "" { - panic("newVariable: empty name") - } - name = encodeIdent(name) - if c.p.minify { - i := 0 - for { - offset := int('a') - if pkgLevel { - offset = int('A') - } - j := i - name = "" - for { - name = string(offset+(j%26)) + name - j = j/26 - 1 - if j == -1 { - break - } - } - if c.allVars[name] == 0 { - break - } - i++ - } - } - n := c.allVars[name] - c.allVars[name] = n + 1 - varName := name - if n > 0 { - varName = fmt.Sprintf("%s$%d", name, n) - } - - if pkgLevel { - for c2 := c.parent; c2 != nil; c2 = c2.parent { - c2.allVars[name] = n + 1 - } - return varName - } - - c.localVars = append(c.localVars, varName) - return varName -} - -func (c *funcContext) newIdent(name string, t types.Type) *ast.Ident { - ident := ast.NewIdent(name) - c.setType(ident, t) - obj := types.NewVar(0, c.p.Pkg, name, t) - c.p.Uses[ident] = obj - c.p.objectNames[obj] = name - return ident -} - -func (c *funcContext) setType(e ast.Expr, t types.Type) ast.Expr { - c.p.Types[e] = types.TypeAndValue{Type: t} - return e -} - -func (c *funcContext) pkgVar(pkg *types.Package) string { - if pkg == c.p.Pkg { - return "$pkg" - } - - pkgVar, found := c.p.pkgVars[pkg.Path()] - if !found { - pkgVar = fmt.Sprintf(`$packages["%s"]`, pkg.Path()) - } - return pkgVar -} - -func isVarOrConst(o types.Object) bool { - switch o.(type) { - case *types.Var, *types.Const: - return true - } - return false -} - -func isPkgLevel(o types.Object) bool { - return o.Parent() != nil && o.Parent().Parent() == types.Universe -} - -func (c *funcContext) objectName(o types.Object) string { - if isPkgLevel(o) { - c.p.dependencies[o] = true - - if o.Pkg() != c.p.Pkg || (isVarOrConst(o) && o.Exported()) { - return c.pkgVar(o.Pkg()) + "." + o.Name() - } - } - - name, ok := c.p.objectNames[o] - if !ok { - name = c.newVariableWithLevel(o.Name(), isPkgLevel(o)) - c.p.objectNames[o] = name - } - - if v, ok := o.(*types.Var); ok && c.p.escapingVars[v] { - return name + "[0]" - } - return name -} - -func (c *funcContext) varPtrName(o *types.Var) string { - if isPkgLevel(o) && o.Exported() { - return c.pkgVar(o.Pkg()) + "." + o.Name() + "$ptr" - } - - name, ok := c.p.varPtrNames[o] - if !ok { - name = c.newVariableWithLevel(o.Name()+"$ptr", isPkgLevel(o)) - c.p.varPtrNames[o] = name - } - return name -} - -func (c *funcContext) typeName(ty types.Type) string { - switch t := ty.(type) { - case *types.Basic: - return "$" + toJavaScriptType(t) - case *types.Named: - if t.Obj().Name() == "error" { - return "$error" - } - return c.objectName(t.Obj()) - case *types.Interface: - if t.Empty() { - return "$emptyInterface" - } - } - - anonType, ok := c.p.anonTypeMap.At(ty).(*types.TypeName) - if !ok { - c.initArgs(ty) // cause all embedded types to be registered - varName := c.newVariableWithLevel(strings.ToLower(typeKind(ty)[5:])+"Type", true) - anonType = types.NewTypeName(token.NoPos, c.p.Pkg, varName, ty) // fake types.TypeName - c.p.anonTypes = append(c.p.anonTypes, anonType) - c.p.anonTypeMap.Set(ty, anonType) - } - c.p.dependencies[anonType] = true - return anonType.Name() -} - -func (c *funcContext) externalize(s string, t types.Type) string { - if typesutil.IsJsObject(t) { - return s - } - switch u := t.Underlying().(type) { - case *types.Basic: - if isNumeric(u) && !is64Bit(u) && !isComplex(u) { - return s - } - if u.Kind() == types.UntypedNil { - return "null" - } - } - return fmt.Sprintf("$externalize(%s, %s)", s, c.typeName(t)) -} - -func (c *funcContext) handleEscapingVars(n ast.Node) { - newEscapingVars := make(map[*types.Var]bool) - for escaping := range c.p.escapingVars { - newEscapingVars[escaping] = true - } - c.p.escapingVars = newEscapingVars - - var names []string - objs := analysis.EscapingObjects(n, c.p.Info.Info) - sort.Slice(objs, func(i, j int) bool { - if objs[i].Name() == objs[j].Name() { - return objs[i].Pos() < objs[j].Pos() - } - return objs[i].Name() < objs[j].Name() - }) - for _, obj := range objs { - names = append(names, c.objectName(obj)) - c.p.escapingVars[obj] = true - } - sort.Strings(names) - for _, name := range names { - c.Printf("%s = [%s];", name, name) - } -} - -func fieldName(t *types.Struct, i int) string { - name := t.Field(i).Name() - if name == "_" || reservedKeywords[name] { - return fmt.Sprintf("%s$%d", name, i) - } - return name -} - -func typeKind(ty types.Type) string { - switch t := ty.Underlying().(type) { - case *types.Basic: - return "$kind" + toJavaScriptType(t) - case *types.Array: - return "$kindArray" - case *types.Chan: - return "$kindChan" - case *types.Interface: - return "$kindInterface" - case *types.Map: - return "$kindMap" - case *types.Signature: - return "$kindFunc" - case *types.Slice: - return "$kindSlice" - case *types.Struct: - return "$kindStruct" - case *types.Pointer: - return "$kindPtr" - default: - panic(fmt.Sprintf("Unhandled type: %T\n", t)) - } -} - -func toJavaScriptType(t *types.Basic) string { - switch t.Kind() { - case types.UntypedInt: - return "Int" - case types.Byte: - return "Uint8" - case types.Rune: - return "Int32" - case types.UnsafePointer: - return "UnsafePointer" - default: - name := t.String() - return strings.ToUpper(name[:1]) + name[1:] - } -} - -func is64Bit(t *types.Basic) bool { - return t.Kind() == types.Int64 || t.Kind() == types.Uint64 -} - -func isBoolean(t *types.Basic) bool { - return t.Info()&types.IsBoolean != 0 -} - -func isComplex(t *types.Basic) bool { - return t.Info()&types.IsComplex != 0 -} - -func isFloat(t *types.Basic) bool { - return t.Info()&types.IsFloat != 0 -} - -func isInteger(t *types.Basic) bool { - return t.Info()&types.IsInteger != 0 -} - -func isNumeric(t *types.Basic) bool { - return t.Info()&types.IsNumeric != 0 -} - -func isString(t *types.Basic) bool { - return t.Info()&types.IsString != 0 -} - -func isUnsigned(t *types.Basic) bool { - return t.Info()&types.IsUnsigned != 0 -} - -func isBlank(expr ast.Expr) bool { - if expr == nil { - return true - } - if id, isIdent := expr.(*ast.Ident); isIdent { - return id.Name == "_" - } - return false -} - -func isWrapped(ty types.Type) bool { - switch t := ty.Underlying().(type) { - case *types.Basic: - return !is64Bit(t) && !isComplex(t) && t.Kind() != types.UntypedNil - case *types.Array, *types.Chan, *types.Map, *types.Signature: - return true - case *types.Pointer: - _, isArray := t.Elem().Underlying().(*types.Array) - return isArray - } - return false -} - -func encodeString(s string) string { - buffer := bytes.NewBuffer(nil) - for _, r := range []byte(s) { - switch r { - case '\b': - buffer.WriteString(`\b`) - case '\f': - buffer.WriteString(`\f`) - case '\n': - buffer.WriteString(`\n`) - case '\r': - buffer.WriteString(`\r`) - case '\t': - buffer.WriteString(`\t`) - case '\v': - buffer.WriteString(`\v`) - case '"': - buffer.WriteString(`\"`) - case '\\': - buffer.WriteString(`\\`) - default: - if r < 0x20 || r > 0x7E { - fmt.Fprintf(buffer, `\x%02X`, r) - continue - } - buffer.WriteByte(r) - } - } - return `"` + buffer.String() + `"` -} - -func getJsTag(tag string) string { - for tag != "" { - // skip leading space - i := 0 - for i < len(tag) && tag[i] == ' ' { - i++ - } - tag = tag[i:] - if tag == "" { - break - } - - // scan to colon. - // a space or a quote is a syntax error - i = 0 - for i < len(tag) && tag[i] != ' ' && tag[i] != ':' && tag[i] != '"' { - i++ - } - if i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { - break - } - name := string(tag[:i]) - tag = tag[i+1:] - - // scan quoted string to find value - i = 1 - for i < len(tag) && tag[i] != '"' { - if tag[i] == '\\' { - i++ - } - i++ - } - if i >= len(tag) { - break - } - qvalue := string(tag[:i+1]) - tag = tag[i+1:] - - if name == "js" { - value, _ := strconv.Unquote(qvalue) - return value - } - } - return "" -} - -func needsSpace(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '$' -} - -func removeWhitespace(b []byte, minify bool) []byte { - if !minify { - return b - } - - var out []byte - var previous byte - for len(b) > 0 { - switch b[0] { - case '\b': - out = append(out, b[:5]...) - b = b[5:] - continue - case ' ', '\t', '\n': - if (!needsSpace(previous) || !needsSpace(b[1])) && !(previous == '-' && b[1] == '-') { - b = b[1:] - continue - } - case '"': - out = append(out, '"') - b = b[1:] - for { - i := bytes.IndexAny(b, "\"\\") - out = append(out, b[:i]...) - b = b[i:] - if b[0] == '"' { - break - } - // backslash - out = append(out, b[:2]...) - b = b[2:] - } - case '/': - if b[1] == '*' { - i := bytes.Index(b[2:], []byte("*/")) - b = b[i+4:] - continue - } - } - out = append(out, b[0]) - previous = b[0] - b = b[1:] - } - return out -} - -func rangeCheck(pattern string, constantIndex, array bool) string { - if constantIndex && array { - return pattern - } - lengthProp := "$length" - if array { - lengthProp = "length" - } - check := "%2f >= %1e." + lengthProp - if !constantIndex { - check = "(%2f < 0 || " + check + ")" - } - return "(" + check + ` ? ($throwRuntimeError("index out of range"), undefined) : ` + pattern + ")" -} - -func endsWithReturn(stmts []ast.Stmt) bool { - if len(stmts) > 0 { - if _, ok := stmts[len(stmts)-1].(*ast.ReturnStmt); ok { - return true - } - } - return false -} - -func encodeIdent(name string) string { - return strings.Replace(url.QueryEscape(name), "%", "$", -1) -} - -// formatJSStructTagVal returns JavaScript code for accessing an object's property -// identified by jsTag. It prefers the dot notation over the bracket notation when -// possible, since the dot notation produces slightly smaller output. -// -// For example: -// -// "my_name" -> ".my_name" -// "my name" -> `["my name"]` -// -// For more information about JavaScript property accessors and identifiers, see -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Property_Accessors and -// https://developer.mozilla.org/en-US/docs/Glossary/Identifier. -// -func formatJSStructTagVal(jsTag string) string { - for i, r := range jsTag { - ok := unicode.IsLetter(r) || (i != 0 && unicode.IsNumber(r)) || r == '$' || r == '_' - if !ok { - // Saw an invalid JavaScript identifier character, - // so use bracket notation. - return `["` + template.JSEscapeString(jsTag) + `"]` - } - } - // Safe to use dot notation without any escaping. - return "." + jsTag -} diff --git a/vendor/github.com/gopherjs/gopherjs/compiler/version_check.go b/vendor/github.com/gopherjs/gopherjs/compiler/version_check.go deleted file mode 100644 index ce16f081f..000000000 --- a/vendor/github.com/gopherjs/gopherjs/compiler/version_check.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.12 -// +build !go1.13 - -package compiler - -const ___GOPHERJS_REQUIRES_GO_VERSION_1_12___ = true - -// Version is the GopherJS compiler version string. -const Version = "1.12-1" diff --git a/vendor/github.com/gopherjs/gopherjs/internal/sysutil/sysutil.go b/vendor/github.com/gopherjs/gopherjs/internal/sysutil/sysutil.go deleted file mode 100644 index c3631f2b2..000000000 --- a/vendor/github.com/gopherjs/gopherjs/internal/sysutil/sysutil.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -// Package sysutil contains system-specific utilities. -package sysutil - -import "golang.org/x/sys/unix" - -// RlimitStack reports the current stack size limit in bytes. -func RlimitStack() (cur uint64, err error) { - var r unix.Rlimit - err = unix.Getrlimit(unix.RLIMIT_STACK, &r) - return uint64(r.Cur), err // Type conversion because Cur is one of uint64, int64 depending on unix flavor. -} diff --git a/vendor/github.com/gopherjs/gopherjs/internal/sysutil/sysutil_windows.go b/vendor/github.com/gopherjs/gopherjs/internal/sysutil/sysutil_windows.go deleted file mode 100644 index 5e959b2bf..000000000 --- a/vendor/github.com/gopherjs/gopherjs/internal/sysutil/sysutil_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package sysutil - -import "errors" - -func RlimitStack() (uint64, error) { - return 0, errors.New("RlimitStack is not implemented on Windows") -} diff --git a/vendor/github.com/gopherjs/gopherjs/nosync/map.go b/vendor/github.com/gopherjs/gopherjs/nosync/map.go deleted file mode 100644 index 6cd845a27..000000000 --- a/vendor/github.com/gopherjs/gopherjs/nosync/map.go +++ /dev/null @@ -1,67 +0,0 @@ -package nosync - -// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. -// It is safe for multiple goroutines to call a Map's methods concurrently. -// -// The zero Map is valid and empty. -// -// A Map must not be copied after first use. -type Map struct { - m map[interface{}]interface{} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *Map) Load(key interface{}) (value interface{}, ok bool) { - value, ok = m.m[key] - return value, ok -} - -// Store sets the value for a key. -func (m *Map) Store(key, value interface{}) { - if m.m == nil { - m.m = make(map[interface{}]interface{}) - } - m.m[key] = value -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { - if value, ok := m.m[key]; ok { - return value, true - } - if m.m == nil { - m.m = make(map[interface{}]interface{}) - } - m.m[key] = value - return value, false -} - -// Delete deletes the value for a key. -func (m *Map) Delete(key interface{}) { - if m.m == nil { - return - } - delete(m.m, key) -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *Map) Range(f func(key, value interface{}) bool) { - for k, v := range m.m { - if !f(k, v) { - break - } - } -} diff --git a/vendor/github.com/gopherjs/gopherjs/nosync/mutex.go b/vendor/github.com/gopherjs/gopherjs/nosync/mutex.go deleted file mode 100644 index 03f20dc40..000000000 --- a/vendor/github.com/gopherjs/gopherjs/nosync/mutex.go +++ /dev/null @@ -1,85 +0,0 @@ -package nosync - -// Mutex is a dummy which is non-blocking. -type Mutex struct { - locked bool -} - -// Lock locks m. It is a run-time error if m is already locked. -func (m *Mutex) Lock() { - if m.locked { - panic("nosync: mutex is already locked") - } - m.locked = true -} - -// Unlock unlocks m. It is a run-time error if m is not locked. -func (m *Mutex) Unlock() { - if !m.locked { - panic("nosync: unlock of unlocked mutex") - } - m.locked = false -} - -// RWMutex is a dummy which is non-blocking. -type RWMutex struct { - writeLocked bool - readLockCounter int -} - -// Lock locks m for writing. It is a run-time error if rw is already locked for reading or writing. -func (rw *RWMutex) Lock() { - if rw.readLockCounter != 0 || rw.writeLocked { - panic("nosync: mutex is already locked") - } - rw.writeLocked = true -} - -// Unlock unlocks rw for writing. It is a run-time error if rw is not locked for writing. -func (rw *RWMutex) Unlock() { - if !rw.writeLocked { - panic("nosync: unlock of unlocked mutex") - } - rw.writeLocked = false -} - -// RLock locks m for reading. It is a run-time error if rw is already locked for reading or writing. -func (rw *RWMutex) RLock() { - if rw.writeLocked { - panic("nosync: mutex is already locked") - } - rw.readLockCounter++ -} - -// RUnlock undoes a single RLock call; it does not affect other simultaneous readers. It is a run-time error if rw is not locked for reading. -func (rw *RWMutex) RUnlock() { - if rw.readLockCounter == 0 { - panic("nosync: unlock of unlocked mutex") - } - rw.readLockCounter-- -} - -// WaitGroup is a dummy which is non-blocking. -type WaitGroup struct { - counter int -} - -// Add adds delta, which may be negative, to the WaitGroup If the counter goes negative, Add panics. -func (wg *WaitGroup) Add(delta int) { - wg.counter += delta - if wg.counter < 0 { - panic("sync: negative WaitGroup counter") - } -} - -// Done decrements the WaitGroup counter. -func (wg *WaitGroup) Done() { - wg.Add(-1) -} - -// Wait panics if the WaitGroup counter is not zero. -func (wg *WaitGroup) Wait() { - if wg.counter != 0 { - panic("sync: WaitGroup counter not zero") - } -} diff --git a/vendor/github.com/gopherjs/gopherjs/nosync/once.go b/vendor/github.com/gopherjs/gopherjs/nosync/once.go deleted file mode 100644 index f4cb69540..000000000 --- a/vendor/github.com/gopherjs/gopherjs/nosync/once.go +++ /dev/null @@ -1,39 +0,0 @@ -package nosync - -// Once is an object that will perform exactly one action. -type Once struct { - doing bool - done bool -} - -// Do calls the function f if and only if Do is being called for the -// first time for this instance of Once. In other words, given -// var once Once -// if once.Do(f) is called multiple times, only the first call will invoke f, -// even if f has a different value in each invocation. A new instance of -// Once is required for each function to execute. -// -// Do is intended for initialization that must be run exactly once. Since f -// is niladic, it may be necessary to use a function literal to capture the -// arguments to a function to be invoked by Do: -// config.once.Do(func() { config.init(filename) }) -// -// If f causes Do to be called, it will panic. -// -// If f panics, Do considers it to have returned; future calls of Do return -// without calling f. -// -func (o *Once) Do(f func()) { - if o.done { - return - } - if o.doing { - panic("nosync: Do called within f") - } - o.doing = true - defer func() { - o.doing = false - o.done = true - }() - f() -} diff --git a/vendor/github.com/gopherjs/gopherjs/nosync/pool.go b/vendor/github.com/gopherjs/gopherjs/nosync/pool.go deleted file mode 100644 index 3d448e0fc..000000000 --- a/vendor/github.com/gopherjs/gopherjs/nosync/pool.go +++ /dev/null @@ -1,63 +0,0 @@ -package nosync - -// A Pool is a set of temporary objects that may be individually saved and -// retrieved. -// -// Any item stored in the Pool may be removed automatically at any time without -// notification. If the Pool holds the only reference when this happens, the -// item might be deallocated. -// -// A Pool is safe for use by multiple goroutines simultaneously. -// -// Pool's purpose is to cache allocated but unused items for later reuse, -// relieving pressure on the garbage collector. That is, it makes it easy to -// build efficient, thread-safe free lists. However, it is not suitable for all -// free lists. -// -// An appropriate use of a Pool is to manage a group of temporary items -// silently shared among and potentially reused by concurrent independent -// clients of a package. Pool provides a way to amortize allocation overhead -// across many clients. -// -// An example of good use of a Pool is in the fmt package, which maintains a -// dynamically-sized store of temporary output buffers. The store scales under -// load (when many goroutines are actively printing) and shrinks when -// quiescent. -// -// On the other hand, a free list maintained as part of a short-lived object is -// not a suitable use for a Pool, since the overhead does not amortize well in -// that scenario. It is more efficient to have such objects implement their own -// free list. -// -type Pool struct { - store []interface{} - New func() interface{} -} - -// Get selects an arbitrary item from the Pool, removes it from the -// Pool, and returns it to the caller. -// Get may choose to ignore the pool and treat it as empty. -// Callers should not assume any relation between values passed to Put and -// the values returned by Get. -// -// If Get would otherwise return nil and p.New is non-nil, Get returns -// the result of calling p.New. -func (p *Pool) Get() interface{} { - if len(p.store) == 0 { - if p.New != nil { - return p.New() - } - return nil - } - x := p.store[len(p.store)-1] - p.store = p.store[:len(p.store)-1] - return x -} - -// Put adds x to the pool. -func (p *Pool) Put(x interface{}) { - if x == nil { - return - } - p.store = append(p.store, x) -} diff --git a/vendor/github.com/gopherjs/gopherjs/tool.go b/vendor/github.com/gopherjs/gopherjs/tool.go deleted file mode 100644 index 4c580a139..000000000 --- a/vendor/github.com/gopherjs/gopherjs/tool.go +++ /dev/null @@ -1,963 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "fmt" - "go/ast" - "go/build" - "go/doc" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "io" - "io/ioutil" - "net" - "net/http" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "syscall" - "text/template" - "time" - "unicode" - "unicode/utf8" - - gbuild "github.com/gopherjs/gopherjs/build" - "github.com/gopherjs/gopherjs/compiler" - "github.com/gopherjs/gopherjs/internal/sysutil" - "github.com/kisielk/gotool" - "github.com/neelance/sourcemap" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/tools/go/buildutil" -) - -var currentDirectory string - -func init() { - var err error - currentDirectory, err = os.Getwd() - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - currentDirectory, err = filepath.EvalSymlinks(currentDirectory) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - gopaths := filepath.SplitList(build.Default.GOPATH) - if len(gopaths) == 0 { - fmt.Fprintf(os.Stderr, "$GOPATH not set. For more details see: go help gopath\n") - os.Exit(1) - } -} - -func main() { - var ( - options = &gbuild.Options{CreateMapFile: true} - pkgObj string - tags string - ) - - flagVerbose := pflag.NewFlagSet("", 0) - flagVerbose.BoolVarP(&options.Verbose, "verbose", "v", false, "print the names of packages as they are compiled") - flagQuiet := pflag.NewFlagSet("", 0) - flagQuiet.BoolVarP(&options.Quiet, "quiet", "q", false, "suppress non-fatal warnings") - - compilerFlags := pflag.NewFlagSet("", 0) - compilerFlags.BoolVarP(&options.Minify, "minify", "m", false, "minify generated code") - compilerFlags.BoolVar(&options.Color, "color", terminal.IsTerminal(int(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb", "colored output") - compilerFlags.StringVar(&tags, "tags", "", "a list of build tags to consider satisfied during the build") - compilerFlags.BoolVar(&options.MapToLocalDisk, "localmap", false, "use local paths for sourcemap") - - flagWatch := pflag.NewFlagSet("", 0) - flagWatch.BoolVarP(&options.Watch, "watch", "w", false, "watch for changes to the source files") - - cmdBuild := &cobra.Command{ - Use: "build [packages]", - Short: "compile packages and dependencies", - } - cmdBuild.Flags().StringVarP(&pkgObj, "output", "o", "", "output file") - cmdBuild.Flags().AddFlagSet(flagVerbose) - cmdBuild.Flags().AddFlagSet(flagQuiet) - cmdBuild.Flags().AddFlagSet(compilerFlags) - cmdBuild.Flags().AddFlagSet(flagWatch) - cmdBuild.Run = func(cmd *cobra.Command, args []string) { - options.BuildTags = strings.Fields(tags) - for { - s := gbuild.NewSession(options) - - err := func() error { - // Handle "gopherjs build [files]" ad-hoc package mode. - if len(args) > 0 && (strings.HasSuffix(args[0], ".go") || strings.HasSuffix(args[0], ".inc.js")) { - for _, arg := range args { - if !strings.HasSuffix(arg, ".go") && !strings.HasSuffix(arg, ".inc.js") { - return fmt.Errorf("named files must be .go or .inc.js files") - } - } - if pkgObj == "" { - basename := filepath.Base(args[0]) - pkgObj = basename[:len(basename)-3] + ".js" - } - names := make([]string, len(args)) - for i, name := range args { - name = filepath.ToSlash(name) - names[i] = name - if s.Watcher != nil { - s.Watcher.Add(name) - } - } - err := s.BuildFiles(args, pkgObj, currentDirectory) - return err - } - - // Expand import path patterns. - patternContext := gbuild.NewBuildContext("", options.BuildTags) - pkgs := (&gotool.Context{BuildContext: *patternContext}).ImportPaths(args) - - for _, pkgPath := range pkgs { - if s.Watcher != nil { - pkg, err := gbuild.NewBuildContext(s.InstallSuffix(), options.BuildTags).Import(pkgPath, "", build.FindOnly) - if err != nil { - return err - } - s.Watcher.Add(pkg.Dir) - } - pkg, err := gbuild.Import(pkgPath, 0, s.InstallSuffix(), options.BuildTags) - if err != nil { - return err - } - archive, err := s.BuildPackage(pkg) - if err != nil { - return err - } - if len(pkgs) == 1 { // Only consider writing output if single package specified. - if pkgObj == "" { - pkgObj = filepath.Base(pkg.Dir) + ".js" - } - if pkg.IsCommand() && !pkg.UpToDate { - if err := s.WriteCommandPackage(archive, pkgObj); err != nil { - return err - } - } - } - } - return nil - }() - exitCode := handleError(err, options, nil) - - if s.Watcher == nil { - os.Exit(exitCode) - } - s.WaitForChange() - } - } - - cmdInstall := &cobra.Command{ - Use: "install [packages]", - Short: "compile and install packages and dependencies", - } - cmdInstall.Flags().AddFlagSet(flagVerbose) - cmdInstall.Flags().AddFlagSet(flagQuiet) - cmdInstall.Flags().AddFlagSet(compilerFlags) - cmdInstall.Flags().AddFlagSet(flagWatch) - cmdInstall.Run = func(cmd *cobra.Command, args []string) { - options.BuildTags = strings.Fields(tags) - for { - s := gbuild.NewSession(options) - - err := func() error { - // Expand import path patterns. - patternContext := gbuild.NewBuildContext("", options.BuildTags) - pkgs := (&gotool.Context{BuildContext: *patternContext}).ImportPaths(args) - - if cmd.Name() == "get" { - goGet := exec.Command("go", append([]string{"get", "-d", "-tags=js"}, pkgs...)...) - goGet.Stdout = os.Stdout - goGet.Stderr = os.Stderr - if err := goGet.Run(); err != nil { - return err - } - } - for _, pkgPath := range pkgs { - pkg, err := gbuild.Import(pkgPath, 0, s.InstallSuffix(), options.BuildTags) - if s.Watcher != nil && pkg != nil { // add watch even on error - s.Watcher.Add(pkg.Dir) - } - if err != nil { - return err - } - - archive, err := s.BuildPackage(pkg) - if err != nil { - return err - } - - if pkg.IsCommand() && !pkg.UpToDate { - if err := s.WriteCommandPackage(archive, pkg.PkgObj); err != nil { - return err - } - } - } - return nil - }() - exitCode := handleError(err, options, nil) - - if s.Watcher == nil { - os.Exit(exitCode) - } - s.WaitForChange() - } - } - - cmdDoc := &cobra.Command{ - Use: "doc [arguments]", - Short: "display documentation for the requested, package, method or symbol", - } - cmdDoc.Run = func(cmd *cobra.Command, args []string) { - goDoc := exec.Command("go", append([]string{"doc"}, args...)...) - goDoc.Stdout = os.Stdout - goDoc.Stderr = os.Stderr - goDoc.Env = append(os.Environ(), "GOARCH=js") - err := goDoc.Run() - exitCode := handleError(err, options, nil) - os.Exit(exitCode) - } - - cmdGet := &cobra.Command{ - Use: "get [packages]", - Short: "download and install packages and dependencies", - } - cmdGet.Flags().AddFlagSet(flagVerbose) - cmdGet.Flags().AddFlagSet(flagQuiet) - cmdGet.Flags().AddFlagSet(compilerFlags) - cmdGet.Run = cmdInstall.Run - - cmdRun := &cobra.Command{ - Use: "run [gofiles...] [arguments...]", - Short: "compile and run Go program", - } - cmdRun.Flags().AddFlagSet(flagVerbose) - cmdRun.Flags().AddFlagSet(flagQuiet) - cmdRun.Flags().AddFlagSet(compilerFlags) - cmdRun.Run = func(cmd *cobra.Command, args []string) { - err := func() error { - lastSourceArg := 0 - for { - if lastSourceArg == len(args) || !(strings.HasSuffix(args[lastSourceArg], ".go") || strings.HasSuffix(args[lastSourceArg], ".inc.js")) { - break - } - lastSourceArg++ - } - if lastSourceArg == 0 { - return fmt.Errorf("gopherjs run: no go files listed") - } - - tempfile, err := ioutil.TempFile(currentDirectory, filepath.Base(args[0])+".") - if err != nil && strings.HasPrefix(currentDirectory, runtime.GOROOT()) { - tempfile, err = ioutil.TempFile("", filepath.Base(args[0])+".") - } - if err != nil { - return err - } - defer func() { - tempfile.Close() - os.Remove(tempfile.Name()) - os.Remove(tempfile.Name() + ".map") - }() - s := gbuild.NewSession(options) - if err := s.BuildFiles(args[:lastSourceArg], tempfile.Name(), currentDirectory); err != nil { - return err - } - if err := runNode(tempfile.Name(), args[lastSourceArg:], "", options.Quiet); err != nil { - return err - } - return nil - }() - exitCode := handleError(err, options, nil) - - os.Exit(exitCode) - } - - cmdTest := &cobra.Command{ - Use: "test [packages]", - Short: "test packages", - } - bench := cmdTest.Flags().String("bench", "", "Run benchmarks matching the regular expression. By default, no benchmarks run. To run all benchmarks, use '--bench=.'.") - benchtime := cmdTest.Flags().String("benchtime", "", "Run enough iterations of each benchmark to take t, specified as a time.Duration (for example, -benchtime 1h30s). The default is 1 second (1s).") - count := cmdTest.Flags().String("count", "", "Run each test and benchmark n times (default 1). Examples are always run once.") - run := cmdTest.Flags().String("run", "", "Run only those tests and examples matching the regular expression.") - short := cmdTest.Flags().Bool("short", false, "Tell long-running tests to shorten their run time.") - verbose := cmdTest.Flags().BoolP("verbose", "v", false, "Log all tests as they are run. Also print all text from Log and Logf calls even if the test succeeds.") - compileOnly := cmdTest.Flags().BoolP("compileonly", "c", false, "Compile the test binary to pkg.test.js but do not run it (where pkg is the last element of the package's import path). The file name can be changed with the -o flag.") - outputFilename := cmdTest.Flags().StringP("output", "o", "", "Compile the test binary to the named file. The test still runs (unless -c is specified).") - cmdTest.Flags().AddFlagSet(compilerFlags) - cmdTest.Run = func(cmd *cobra.Command, args []string) { - options.BuildTags = strings.Fields(tags) - err := func() error { - // Expand import path patterns. - patternContext := gbuild.NewBuildContext("", options.BuildTags) - args = (&gotool.Context{BuildContext: *patternContext}).ImportPaths(args) - - if *compileOnly && len(args) > 1 { - return errors.New("cannot use -c flag with multiple packages") - } - if *outputFilename != "" && len(args) > 1 { - return errors.New("cannot use -o flag with multiple packages") - } - - pkgs := make([]*gbuild.PackageData, len(args)) - for i, pkgPath := range args { - var err error - pkgs[i], err = gbuild.Import(pkgPath, 0, "", options.BuildTags) - if err != nil { - return err - } - } - - var exitErr error - for _, pkg := range pkgs { - if len(pkg.TestGoFiles) == 0 && len(pkg.XTestGoFiles) == 0 { - fmt.Printf("? \t%s\t[no test files]\n", pkg.ImportPath) - continue - } - s := gbuild.NewSession(options) - - tests := &testFuncs{BuildContext: s.BuildContext(), Package: pkg.Package} - collectTests := func(testPkg *gbuild.PackageData, testPkgName string, needVar *bool) error { - if testPkgName == "_test" { - for _, file := range pkg.TestGoFiles { - if err := tests.load(pkg.Package.Dir, file, testPkgName, &tests.ImportTest, &tests.NeedTest); err != nil { - return err - } - } - } else { - for _, file := range pkg.XTestGoFiles { - if err := tests.load(pkg.Package.Dir, file, "_xtest", &tests.ImportXtest, &tests.NeedXtest); err != nil { - return err - } - } - } - _, err := s.BuildPackage(testPkg) - return err - } - - if err := collectTests(&gbuild.PackageData{ - Package: &build.Package{ - ImportPath: pkg.ImportPath, - Dir: pkg.Dir, - GoFiles: append(pkg.GoFiles, pkg.TestGoFiles...), - Imports: append(pkg.Imports, pkg.TestImports...), - }, - IsTest: true, - JSFiles: pkg.JSFiles, - }, "_test", &tests.NeedTest); err != nil { - return err - } - - if err := collectTests(&gbuild.PackageData{ - Package: &build.Package{ - ImportPath: pkg.ImportPath + "_test", - Dir: pkg.Dir, - GoFiles: pkg.XTestGoFiles, - Imports: pkg.XTestImports, - }, - IsTest: true, - }, "_xtest", &tests.NeedXtest); err != nil { - return err - } - - buf := new(bytes.Buffer) - if err := testmainTmpl.Execute(buf, tests); err != nil { - return err - } - - fset := token.NewFileSet() - mainFile, err := parser.ParseFile(fset, "_testmain.go", buf, 0) - if err != nil { - return err - } - - importContext := &compiler.ImportContext{ - Packages: s.Types, - Import: func(path string) (*compiler.Archive, error) { - if path == pkg.ImportPath || path == pkg.ImportPath+"_test" { - return s.Archives[path], nil - } - return s.BuildImportPath(path) - }, - } - mainPkgArchive, err := compiler.Compile("main", []*ast.File{mainFile}, fset, importContext, options.Minify) - if err != nil { - return err - } - - if *compileOnly && *outputFilename == "" { - *outputFilename = pkg.Package.Name + "_test.js" - } - - var outfile *os.File - if *outputFilename != "" { - outfile, err = os.Create(*outputFilename) - if err != nil { - return err - } - } else { - outfile, err = ioutil.TempFile(currentDirectory, "test.") - if err != nil { - return err - } - } - defer func() { - outfile.Close() - if *outputFilename == "" { - os.Remove(outfile.Name()) - os.Remove(outfile.Name() + ".map") - } - }() - - if err := s.WriteCommandPackage(mainPkgArchive, outfile.Name()); err != nil { - return err - } - - if *compileOnly { - continue - } - - var args []string - if *bench != "" { - args = append(args, "-test.bench", *bench) - } - if *benchtime != "" { - args = append(args, "-test.benchtime", *benchtime) - } - if *count != "" { - args = append(args, "-test.count", *count) - } - if *run != "" { - args = append(args, "-test.run", *run) - } - if *short { - args = append(args, "-test.short") - } - if *verbose { - args = append(args, "-test.v") - } - status := "ok " - start := time.Now() - if err := runNode(outfile.Name(), args, runTestDir(pkg), options.Quiet); err != nil { - if _, ok := err.(*exec.ExitError); !ok { - return err - } - exitErr = err - status = "FAIL" - } - fmt.Printf("%s\t%s\t%.3fs\n", status, pkg.ImportPath, time.Since(start).Seconds()) - } - return exitErr - }() - exitCode := handleError(err, options, nil) - - os.Exit(exitCode) - } - - cmdServe := &cobra.Command{ - Use: "serve [root]", - Short: "compile on-the-fly and serve", - } - cmdServe.Flags().AddFlagSet(flagVerbose) - cmdServe.Flags().AddFlagSet(flagQuiet) - cmdServe.Flags().AddFlagSet(compilerFlags) - var addr string - cmdServe.Flags().StringVarP(&addr, "http", "", ":8080", "HTTP bind address to serve") - cmdServe.Run = func(cmd *cobra.Command, args []string) { - options.BuildTags = strings.Fields(tags) - dirs := append(filepath.SplitList(build.Default.GOPATH), build.Default.GOROOT) - var root string - - if len(args) > 1 { - cmdServe.HelpFunc()(cmd, args) - os.Exit(1) - } - - if len(args) == 1 { - root = args[0] - } - - sourceFiles := http.FileServer(serveCommandFileSystem{ - serveRoot: root, - options: options, - dirs: dirs, - sourceMaps: make(map[string][]byte), - }) - - ln, err := net.Listen("tcp", addr) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if tcpAddr := ln.Addr().(*net.TCPAddr); tcpAddr.IP.Equal(net.IPv4zero) || tcpAddr.IP.Equal(net.IPv6zero) { // Any available addresses. - fmt.Printf("serving at http://localhost:%d and on port %d of any available addresses\n", tcpAddr.Port, tcpAddr.Port) - } else { // Specific address. - fmt.Printf("serving at http://%s\n", tcpAddr) - } - fmt.Fprintln(os.Stderr, http.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}, sourceFiles)) - } - - cmdVersion := &cobra.Command{ - Use: "version", - Short: "print GopherJS compiler version", - } - cmdVersion.Run = func(cmd *cobra.Command, args []string) { - if len(args) > 0 { - cmdServe.HelpFunc()(cmd, args) - os.Exit(1) - } - - fmt.Printf("GopherJS %s\n", compiler.Version) - } - - rootCmd := &cobra.Command{ - Use: "gopherjs", - Long: "GopherJS is a tool for compiling Go source code to JavaScript.", - } - rootCmd.AddCommand(cmdBuild, cmdGet, cmdInstall, cmdRun, cmdTest, cmdServe, cmdVersion, cmdDoc) - err := rootCmd.Execute() - if err != nil { - os.Exit(2) - } -} - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -type serveCommandFileSystem struct { - serveRoot string - options *gbuild.Options - dirs []string - sourceMaps map[string][]byte -} - -func (fs serveCommandFileSystem) Open(requestName string) (http.File, error) { - name := path.Join(fs.serveRoot, requestName[1:]) // requestName[0] == '/' - - dir, file := path.Split(name) - base := path.Base(dir) // base is parent folder name, which becomes the output file name. - - isPkg := file == base+".js" - isMap := file == base+".js.map" - isIndex := file == "index.html" - - if isPkg || isMap || isIndex { - // If we're going to be serving our special files, make sure there's a Go command in this folder. - s := gbuild.NewSession(fs.options) - pkg, err := gbuild.Import(path.Dir(name), 0, s.InstallSuffix(), fs.options.BuildTags) - if err != nil || pkg.Name != "main" { - isPkg = false - isMap = false - isIndex = false - } - - switch { - case isPkg: - buf := new(bytes.Buffer) - browserErrors := new(bytes.Buffer) - err := func() error { - archive, err := s.BuildPackage(pkg) - if err != nil { - return err - } - - sourceMapFilter := &compiler.SourceMapFilter{Writer: buf} - m := &sourcemap.Map{File: base + ".js"} - sourceMapFilter.MappingCallback = gbuild.NewMappingCallback(m, fs.options.GOROOT, fs.options.GOPATH, fs.options.MapToLocalDisk) - - deps, err := compiler.ImportDependencies(archive, s.BuildImportPath) - if err != nil { - return err - } - if err := compiler.WriteProgramCode(deps, sourceMapFilter); err != nil { - return err - } - - mapBuf := new(bytes.Buffer) - m.WriteTo(mapBuf) - buf.WriteString("//# sourceMappingURL=" + base + ".js.map\n") - fs.sourceMaps[name+".map"] = mapBuf.Bytes() - - return nil - }() - handleError(err, fs.options, browserErrors) - if err != nil { - buf = browserErrors - } - return newFakeFile(base+".js", buf.Bytes()), nil - - case isMap: - if content, ok := fs.sourceMaps[name]; ok { - return newFakeFile(base+".js.map", content), nil - } - } - } - - for _, d := range fs.dirs { - dir := http.Dir(filepath.Join(d, "src")) - - f, err := dir.Open(name) - if err == nil { - return f, nil - } - - // source maps are served outside of serveRoot - f, err = dir.Open(requestName) - if err == nil { - return f, nil - } - } - - if isIndex { - // If there was no index.html file in any dirs, supply our own. - return newFakeFile("index.html", []byte(``)), nil - } - - return nil, os.ErrNotExist -} - -type fakeFile struct { - name string - size int - io.ReadSeeker -} - -func newFakeFile(name string, content []byte) *fakeFile { - return &fakeFile{name: name, size: len(content), ReadSeeker: bytes.NewReader(content)} -} - -func (f *fakeFile) Close() error { - return nil -} - -func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) { - return nil, os.ErrInvalid -} - -func (f *fakeFile) Stat() (os.FileInfo, error) { - return f, nil -} - -func (f *fakeFile) Name() string { - return f.name -} - -func (f *fakeFile) Size() int64 { - return int64(f.size) -} - -func (f *fakeFile) Mode() os.FileMode { - return 0 -} - -func (f *fakeFile) ModTime() time.Time { - return time.Time{} -} - -func (f *fakeFile) IsDir() bool { - return false -} - -func (f *fakeFile) Sys() interface{} { - return nil -} - -// handleError handles err and returns an appropriate exit code. -// If browserErrors is non-nil, errors are written for presentation in browser. -func handleError(err error, options *gbuild.Options, browserErrors *bytes.Buffer) int { - switch err := err.(type) { - case nil: - return 0 - case compiler.ErrorList: - for _, entry := range err { - printError(entry, options, browserErrors) - } - return 1 - case *exec.ExitError: - return err.Sys().(syscall.WaitStatus).ExitStatus() - default: - printError(err, options, browserErrors) - return 1 - } -} - -// printError prints err to Stderr with options. If browserErrors is non-nil, errors are also written for presentation in browser. -func printError(err error, options *gbuild.Options, browserErrors *bytes.Buffer) { - e := sprintError(err) - options.PrintError("%s\n", e) - if browserErrors != nil { - fmt.Fprintln(browserErrors, `console.error("`+template.JSEscapeString(e)+`");`) - } -} - -// sprintError returns an annotated error string without trailing newline. -func sprintError(err error) string { - makeRel := func(name string) string { - if relname, err := filepath.Rel(currentDirectory, name); err == nil { - return relname - } - return name - } - - switch e := err.(type) { - case *scanner.Error: - return fmt.Sprintf("%s:%d:%d: %s", makeRel(e.Pos.Filename), e.Pos.Line, e.Pos.Column, e.Msg) - case types.Error: - pos := e.Fset.Position(e.Pos) - return fmt.Sprintf("%s:%d:%d: %s", makeRel(pos.Filename), pos.Line, pos.Column, e.Msg) - default: - return fmt.Sprintf("%s", e) - } -} - -// runNode runs script with args using Node.js in directory dir. -// If dir is empty string, current directory is used. -func runNode(script string, args []string, dir string, quiet bool) error { - var allArgs []string - if b, _ := strconv.ParseBool(os.Getenv("SOURCE_MAP_SUPPORT")); os.Getenv("SOURCE_MAP_SUPPORT") == "" || b { - allArgs = []string{"--require", "source-map-support/register"} - if err := exec.Command("node", "--require", "source-map-support/register", "--eval", "").Run(); err != nil { - if !quiet { - fmt.Fprintln(os.Stderr, "gopherjs: Source maps disabled. Install source-map-support module for nice stack traces. See https://github.com/gopherjs/gopherjs#gopherjs-run-gopherjs-test.") - } - allArgs = []string{} - } - } - - if runtime.GOOS != "windows" { - // We've seen issues with stack space limits causing - // recursion-heavy standard library tests to fail (e.g., see - // https://github.com/gopherjs/gopherjs/pull/669#issuecomment-319319483). - // - // There are two separate limits in non-Windows environments: - // - // - OS process limit - // - Node.js (V8) limit - // - // GopherJS fetches the current OS process limit, and sets the - // Node.js limit to the same value. So both limits are kept in sync - // and can be controlled by setting OS process limit. E.g.: - // - // ulimit -s 10000 && gopherjs test - // - cur, err := sysutil.RlimitStack() - if err != nil { - return fmt.Errorf("failed to get stack size limit: %v", err) - } - allArgs = append(allArgs, fmt.Sprintf("--stack_size=%v", cur/1000)) // Convert from bytes to KB. - } - - allArgs = append(allArgs, script) - allArgs = append(allArgs, args...) - - node := exec.Command("node", allArgs...) - node.Dir = dir - node.Stdin = os.Stdin - node.Stdout = os.Stdout - node.Stderr = os.Stderr - err := node.Run() - if _, ok := err.(*exec.ExitError); err != nil && !ok { - err = fmt.Errorf("could not run Node.js: %s", err.Error()) - } - return err -} - -// runTestDir returns the directory for Node.js to use when running tests for package p. -// Empty string means current directory. -func runTestDir(p *gbuild.PackageData) string { - if p.IsVirtual { - // The package is virtual and doesn't have a physical directory. Use current directory. - return "" - } - // Run tests in the package directory. - return p.Dir -} - -type testFuncs struct { - BuildContext *build.Context - Tests []testFunc - Benchmarks []testFunc - Examples []testFunc - TestMain *testFunc - Package *build.Package - ImportTest bool - NeedTest bool - ImportXtest bool - NeedXtest bool -} - -type testFunc struct { - Package string // imported package name (_test or _xtest) - Name string // function name - Output string // output, for examples - Unordered bool // output is allowed to be unordered. -} - -var testFileSet = token.NewFileSet() - -func (t *testFuncs) load(dir, file, pkg string, doImport, seen *bool) error { - f, err := buildutil.ParseFile(testFileSet, t.BuildContext, nil, dir, file, parser.ParseComments) - if err != nil { - return err - } - for _, d := range f.Decls { - n, ok := d.(*ast.FuncDecl) - if !ok { - continue - } - if n.Recv != nil { - continue - } - name := n.Name.String() - switch { - case isTestMain(n): - if t.TestMain != nil { - return errors.New("multiple definitions of TestMain") - } - t.TestMain = &testFunc{pkg, name, "", false} - *doImport, *seen = true, true - case isTest(name, "Test"): - t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - case isTest(name, "Benchmark"): - t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - } - } - ex := doc.Examples(f) - sort.Sort(byOrder(ex)) - for _, e := range ex { - *doImport = true // import test file whether executed or not - if e.Output == "" && !e.EmptyOutput { - // Don't run examples with no output. - continue - } - t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered}) - *seen = true - } - - return nil -} - -type byOrder []*doc.Example - -func (x byOrder) Len() int { return len(x) } -func (x byOrder) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byOrder) Less(i, j int) bool { return x[i].Order < x[j].Order } - -// isTestMain tells whether fn is a TestMain(m *testing.M) function. -func isTestMain(fn *ast.FuncDecl) bool { - if fn.Name.String() != "TestMain" || - fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || - fn.Type.Params == nil || - len(fn.Type.Params.List) != 1 || - len(fn.Type.Params.List[0].Names) > 1 { - return false - } - ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) - if !ok { - return false - } - // We can't easily check that the type is *testing.M - // because we don't know how testing has been imported, - // but at least check that it's *M or *something.M. - if name, ok := ptr.X.(*ast.Ident); ok && name.Name == "M" { - return true - } - if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == "M" { - return true - } - return false -} - -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -var testmainTmpl = template.Must(template.New("main").Parse(` -package main - -import ( -{{if not .TestMain}} - "os" -{{end}} - "testing" - "testing/internal/testdeps" - -{{if .ImportTest}} - {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}} -{{end}} -{{if .ImportXtest}} - {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}} -{{end}} -) - -var tests = []testing.InternalTest{ -{{range .Tests}} - {"{{.Name}}", {{.Package}}.{{.Name}}}, -{{end}} -} - -var benchmarks = []testing.InternalBenchmark{ -{{range .Benchmarks}} - {"{{.Name}}", {{.Package}}.{{.Name}}}, -{{end}} -} - -var examples = []testing.InternalExample{ -{{range .Examples}} - {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, -{{end}} -} - -func main() { - m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples) -{{with .TestMain}} - {{.Package}}.{{.Name}}(m) -{{else}} - os.Exit(m.Run()) -{{end}} -} - -`)) diff --git a/vendor/github.com/gopherjs/jquery/jquery.go b/vendor/github.com/gopherjs/jquery/jquery.go index 8cb6ee5f8..37c3c51e0 100644 --- a/vendor/github.com/gopherjs/jquery/jquery.go +++ b/vendor/github.com/gopherjs/jquery/jquery.go @@ -69,6 +69,8 @@ type Event struct { Which int `js:"which"` Namespace string `js:"namespace"` MetaKey bool `js:"metaKey"` + ShiftKey bool `js:"shiftKey"` + CtrlKey bool `js:"ctrlKey"` PageX int `js:"pageX"` PageY int `js:"pageY"` Type string `js:"type"` @@ -209,7 +211,7 @@ func (j JQuery) Each(fn func(int, interface{})) JQuery { } func (j JQuery) Call(name string, args ...interface{}) JQuery { - return NewJQuery( j.o.Call(name, args...) ) + return NewJQuery(j.o.Call(name, args...)) } func (j JQuery) Underlying() *js.Object { @@ -419,13 +421,13 @@ func (j JQuery) InsertBefore(i interface{}) JQuery { return j } -func (j JQuery) Show() JQuery { - j.o = j.o.Call("show") +func (j JQuery) Show(i ...interface{}) JQuery { + j.o = j.o.Call("show", i...) return j } -func (j JQuery) Hide() JQuery { - j.o.Call("hide") +func (j JQuery) Hide(i ...interface{}) JQuery { + j.o.Call("hide", i...) return j } @@ -500,6 +502,10 @@ func (j JQuery) SetWidth(i interface{}) JQuery { return j } +func (j JQuery) Index(i interface{}) int { + return j.o.Call("index", i).Int() +} + func (j JQuery) InnerHeight() int { return j.o.Call("innerHeight").Int() } diff --git a/vendor/github.com/gopherjs/jsbuiltin/LICENSE b/vendor/github.com/gopherjs/jsbuiltin/LICENSE deleted file mode 100644 index 1eebfb119..000000000 --- a/vendor/github.com/gopherjs/jsbuiltin/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2015 Richard Musiol. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gopherjs/jsbuiltin/README.md b/vendor/github.com/gopherjs/jsbuiltin/README.md deleted file mode 100644 index c107ae4f3..000000000 --- a/vendor/github.com/gopherjs/jsbuiltin/README.md +++ /dev/null @@ -1,103 +0,0 @@ -[![Build Status](https://api.travis-ci.org/gopherjs/jsbuiltin.svg?branch=master)](https://travis-ci.org/gopherjs/jsbuiltin) [![GoDoc](https://godoc.org/github.com/gopherjs/jsbuiltin?status.png)](http://godoc.org/github.com/gopherjs/jsbuiltin) - -jsbuiltin - Built-in JavaScript functions for GopherJS ------------------------------------------------------- - -JavaScript has a small number of [built-in -functions](https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects) -to handle some common day-to-day tasks. This package providers wrappers around -some of these functions for use in GopherJS. - -It is worth noting that in many cases, using Go's equivalent functionality -(such as that found in the [net/url](https://golang.org/pkg/net/url/) package) -may be preferable to using this package, and will be a necessity any time you -wish to share functionality between front-end and back-end code. - -### What is supported? -Not all JavaScript built-in functions make sense or are useful in a Go -environment. The table below shows each of the JavaScript built-in functions, -and its current state in this package. - -| Name | Supported | Comment | -|----------------------|-----------|-----------------------------| -| eval() | -- | | -| uneval() | -- | | -| isFinite() | yes | | -| isNaN() | yes | | -| parseFloat() | TODO? | See note below | -| parseInt() | TODO? | See note below | -| decodeURI() | yes | | -| decodeURIComponent() | yes | | -| encodeURI() | yes | | -| encodeURIComponent() | yes | | -| escape() | -- | deprecated circa 2000 | -| Number() | -- | See note below | -| String() | -- | Use js.Object.String() | -| unescape() | -- | deprecated circa 2000 | -| typeof operator | yes | | -| instanceof operator | yes | | - -#### Notes on unmplemented functions - -* **eval()**: Is there ever a need to eval JS code from within Go? -* **Number()**: This requires handling a bunch of corner cases which don't - normally exist in a strictly typed language such as Go. It seems that anyone - with a legitimate need for this function probably needs to write their own - wrapper to handle the cases that matter to them. -* **parseInt()** and **parseFloat()**: These could be added, but doing so - will require answering some questions about the interfce. JavaScript has - effectively two relevant data types (int and float) where Go has has 12. - Deciding how to map JS's `parseInt()` to Go's `(u?)int(8|16|32|64)` types, - and JS's `parseFloat()` Go's `float(32|64)` or `complex(64|128)` needs to - be considered, as well as how to handle error cases (Go doesn't have a `NaN` - type, so any `NaN` result probably needs to be converted to a proper Go - error). If this matters to you, comments and/or PRs are welcome. - -### Installation and Usage -Get or update this package and dependencies with: - -``` -go get -u -d -tags=js github.com/gopherjs/jsbuiltin -``` - -### Basic usage example - -This is a modified version of the Pet example in the main GopherJS documentation, -to accept and return URI-encoded pet names using the jsbuiltin package. - -```go -package main - -import ( - "github.com/gopherjs/gopherjs/js" - "github.com/gopherjs/jsbuiltin" -) - -func main() { - js.Global.Set("pet", map[string]interface{}{ - "New": New, - }) -} - -type Pet struct { - name string -} - -func New(name string) *js.Object { - return js.MakeWrapper(&Pet{name}) -} - -func (p *Pet) Name() string { - return jsbuiltin.EncodeURIComponent(p.name) -} - -func (p *Pet) SetName(uriComponent string) error { - name, err := jsbuiltin.DecodeURIComponent(uriComponent) - if err != nil { - // Malformed UTF8 in uriComponent - return err - } - p.name = name - return nil -} -``` diff --git a/vendor/github.com/gopherjs/jsbuiltin/jsbuiltin.go b/vendor/github.com/gopherjs/jsbuiltin/jsbuiltin.go deleted file mode 100755 index cddb4d8d3..000000000 --- a/vendor/github.com/gopherjs/jsbuiltin/jsbuiltin.go +++ /dev/null @@ -1,98 +0,0 @@ -// Package jsbuiltin provides minimal wrappers around some JavasScript -// built-in functions. -package jsbuiltin - -import ( - "errors" - - "github.com/gopherjs/gopherjs/js" -) - -// DecodeURI decodes a Uniform Resource Identifier (URI) previously created -// by EncodeURI() or by a similar routine. If the underlying JavaScript -// function throws an error, it is returned as an error. -func DecodeURI(uri string) (raw string, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(*js.Error) - } - }() - raw = js.Global.Call("decodeURI", uri).String() - return -} - -// EncodeURI encodes a Uniform Resource Identifier (URI) by replacing each -// instance of certain characters by one, two, three, or four escape sequences -// representing the UTF-8 encoding of the character (will only be four escape -// sequences for characters composed of two "surrogate" characters). -func EncodeURI(uri string) string { - return js.Global.Call("encodeURI", uri).String() -} - -// EncodeURIComponent encodes a Uniform Resource Identifier (URI) component -// by replacing each instance of certain characters by one, two, three, or -// four escape sequences representing the UTF-8 encoding of the character -// (will only be four escape sequences for characters composed of two -// "surrogate" characters). -func EncodeURIComponent(uri string) string { - return js.Global.Call("encodeURIComponent", uri).String() -} - -// DecodeURIComponent decodes a Uniform Resource Identifier (URI) component -// previously created by EncodeURIComponent() or by a similar routine. If the -// underlying JavaScript function throws an error, it is returned as an error. -func DecodeURIComponent(uri string) (raw string, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(*js.Error) - } - }() - raw = js.Global.Call("decodeURIComponent", uri).String() - return -} - -// IsFinite determines whether the passed value is a finite number, and returns -// true if it is. If needed, the parameter is first converted to a number. -func IsFinite(value interface{}) bool { - return js.Global.Call("isFinite", value).Bool() -} - -// IsNaN determines whether a value is NaN (Not-a-Number) or not. A return -// value of true indicates the input value is considered NaN by JavaScript. -func IsNaN(value interface{}) bool { - return js.Global.Call("isNaN", value).Bool() -} - -// Type constants represent the JavaScript builtin types, which may be returned -// by TypeOf(). -const ( - TypeUndefined = "undefined" - TypeNull = "null" - TypeObject = "object" - TypeBoolean = "boolean" - TypeNumber = "number" - TypeString = "string" - TypeFunction = "function" - TypeSymbol = "symbol" -) - -// TypeOf returns the JavaScript type of the passed value -func TypeOf(value interface{}) string { - return js.Global.Get("$jsbuiltin$").Call("typeoffunc", value).String() -} - -// InstanceOf returns true if value is an instance of object according to the -// built-in 'instanceof' operator. `object` must be a *js.Object representing -// a javascript constructor function. -func InstanceOf(value interface{}, object *js.Object) bool { - return js.Global.Get("$jsbuiltin$").Call("instanceoffunc", value, object).Bool() -} - -// In returns true if key is a member of obj. An error is returned if obj is not -// a JavaScript object. -func In(key string, obj *js.Object) (ok bool, err error) { - if obj == nil || obj == js.Undefined || TypeOf(obj) != TypeObject { - return false, errors.New("obj not a JavaScript object") - } - return js.Global.Get("$jsbuiltin$").Call("infunc", key, obj).Bool(), nil -} diff --git a/vendor/github.com/gopherjs/jsbuiltin/jsbuiltin.inc.js b/vendor/github.com/gopherjs/jsbuiltin/jsbuiltin.inc.js deleted file mode 100755 index be5061994..000000000 --- a/vendor/github.com/gopherjs/jsbuiltin/jsbuiltin.inc.js +++ /dev/null @@ -1,5 +0,0 @@ -$global.$jsbuiltin$ = { - typeoffunc: function(x) { return typeof x }, - instanceoffunc: function(x,y) { return x instanceof y }, - infunc: function(x,y) { return x in y } -} diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md index 187ef676d..110ad7999 100644 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -4,4 +4,84 @@ -See http://jmespath.org for more info. +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 9cfa988bc..010efe9bf 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// JMESPath is the representation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod new file mode 100644 index 000000000..aa1e3f1c9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.mod @@ -0,0 +1,5 @@ +module github.com/jmespath/go-jmespath + +go 1.14 + +require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum new file mode 100644 index 000000000..331fa6982 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index 1240a1755..4abc303ab 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -137,7 +137,7 @@ func (p *Parser) Parse(expression string) (ASTNode, error) { } if p.current() != tEOF { return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) + "Unexpected token at the end of the expression: %s", p.current())) } return parsed, nil } diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go deleted file mode 100644 index 7c634cdc5..000000000 --- a/vendor/github.com/lib/pq/oid/gen.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build ignore - -// Generate the table of OID values -// Run with 'go run gen.go'. -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - "os/exec" - "strings" - - _ "github.com/lib/pq" -) - -// OID represent a postgres Object Identifier Type. -type OID struct { - ID int - Type string -} - -// Name returns an upper case version of the oid type. -func (o OID) Name() string { - return strings.ToUpper(o.Type) -} - -func main() { - datname := os.Getenv("PGDATABASE") - sslmode := os.Getenv("PGSSLMODE") - - if datname == "" { - os.Setenv("PGDATABASE", "pqgotest") - } - - if sslmode == "" { - os.Setenv("PGSSLMODE", "disable") - } - - db, err := sql.Open("postgres", "") - if err != nil { - log.Fatal(err) - } - rows, err := db.Query(` - SELECT typname, oid - FROM pg_type WHERE oid < 10000 - ORDER BY oid; - `) - if err != nil { - log.Fatal(err) - } - oids := make([]*OID, 0) - for rows.Next() { - var oid OID - if err = rows.Scan(&oid.Type, &oid.ID); err != nil { - log.Fatal(err) - } - oids = append(oids, &oid) - } - if err = rows.Err(); err != nil { - log.Fatal(err) - } - cmd := exec.Command("gofmt") - cmd.Stderr = os.Stderr - w, err := cmd.StdinPipe() - if err != nil { - log.Fatal(err) - } - f, err := os.Create("types.go") - if err != nil { - log.Fatal(err) - } - cmd.Stdout = f - err = cmd.Start() - if err != nil { - log.Fatal(err) - } - fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") - fmt.Fprintln(w, "\npackage oid") - fmt.Fprintln(w, "const (") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) - } - fmt.Fprintln(w, ")") - fmt.Fprintln(w, "var TypeName = map[Oid]string{") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) - } - fmt.Fprintln(w, "}") - w.Close() - cmd.Wait() -} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 35786f22c..000000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,340 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) - fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) -if err != nil { - return off, err -} -headerEnd := off -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - fallthrough - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3 - o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n") - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - // We have packed everything, only now we know the rdlength of this RR - fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) - fmt.Fprintf(b, "rr := new(%s)\n", name) - fmt.Fprint(b, "rr.Hdr = h\n") - fmt.Fprint(b, `if noRdata(h) { -return rr, off, nil - } -var err error -rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return rr, off, nil - } -`) - } - } - fmt.Fprintf(b, "return rr, off, err }\n\n") - } - // Generate typeToUnpack map - fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") - for _, name := range namedTypes { - if name == "RFC3597" { - continue - } - fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) - } - fmt.Fprintln(b, "}\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index bf80da329..000000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,271 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, -} - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -// Header() functions -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len()\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: - o("l += len(rr.%s) + 1\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"a"`: - o("l += net.IPv4len // %s\n") - case st.Tag(i) == `dns:"aaaa"`: - o("l += net.IPv6len // %s\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l += 1 // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"*rr.Hdr.copyHeader()"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/neelance/astrewrite/LICENSE b/vendor/github.com/neelance/astrewrite/LICENSE deleted file mode 100644 index ce7f487d2..000000000 --- a/vendor/github.com/neelance/astrewrite/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2016 Richard Musiol. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/neelance/astrewrite/simplify.go b/vendor/github.com/neelance/astrewrite/simplify.go deleted file mode 100644 index a4fcc51ea..000000000 --- a/vendor/github.com/neelance/astrewrite/simplify.go +++ /dev/null @@ -1,835 +0,0 @@ -package astrewrite - -import ( - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" -) - -type simplifyContext struct { - info *types.Info - varCounter int - simplifyCalls bool -} - -func Simplify(file *ast.File, info *types.Info, simplifyCalls bool) *ast.File { - c := &simplifyContext{info: info, simplifyCalls: simplifyCalls} - - decls := make([]ast.Decl, len(file.Decls)) - for i, decl := range file.Decls { - c.varCounter = 0 - switch decl := decl.(type) { - case *ast.GenDecl: - decls[i] = c.simplifyGenDecl(nil, decl) - - case *ast.FuncDecl: - decls[i] = &ast.FuncDecl{ - Doc: decl.Doc, - Recv: decl.Recv, - Name: decl.Name, - Type: decl.Type, - Body: c.simplifyBlock(decl.Body), - } - } - } - - newFile := &ast.File{ - Doc: file.Doc, - Package: file.Package, - Name: file.Name, - Decls: decls, - Scope: file.Scope, - Imports: file.Imports, - Unresolved: file.Unresolved, - Comments: file.Comments, - } - c.info.Scopes[newFile] = c.info.Scopes[file] - return newFile -} - -func (c *simplifyContext) simplifyStmtList(stmts []ast.Stmt) []ast.Stmt { - var newStmts []ast.Stmt - for _, s := range stmts { - c.simplifyStmt(&newStmts, s) - } - return newStmts -} - -func (c *simplifyContext) simplifyGenDecl(stmts *[]ast.Stmt, decl *ast.GenDecl) *ast.GenDecl { - if decl.Tok != token.VAR { - return decl - } - - specs := make([]ast.Spec, len(decl.Specs)) - for j, spec := range decl.Specs { - switch spec := spec.(type) { - case *ast.ValueSpec: - var values []ast.Expr - if spec.Values != nil { - values = make([]ast.Expr, len(spec.Values)) - for i, v := range spec.Values { - v2 := c.simplifyExpr(stmts, v) - for _, initializer := range c.info.InitOrder { - if initializer.Rhs == v { - initializer.Rhs = v2 - } - } - values[i] = v2 - } - } - specs[j] = &ast.ValueSpec{ - Doc: spec.Doc, - Names: spec.Names, - Type: spec.Type, - Values: values, - Comment: spec.Comment, - } - default: - specs[j] = spec - } - } - - return &ast.GenDecl{ - Doc: decl.Doc, - TokPos: decl.TokPos, - Tok: token.VAR, - Lparen: decl.Lparen, - Specs: specs, - Rparen: decl.Rparen, - } -} - -func (c *simplifyContext) simplifyStmt(stmts *[]ast.Stmt, s ast.Stmt) { - if s == nil { - return - } - - switch s := s.(type) { - case *ast.ExprStmt: - *stmts = append(*stmts, &ast.ExprStmt{ - X: c.simplifyExpr2(stmts, s.X, true), - }) - - case *ast.BlockStmt: - *stmts = append(*stmts, c.simplifyBlock(s)) - - case *ast.LabeledStmt: - c.simplifyStmt(stmts, s.Stmt) - (*stmts)[len(*stmts)-1] = &ast.LabeledStmt{ - Label: s.Label, - Colon: s.Colon, - Stmt: (*stmts)[len(*stmts)-1], - } - - case *ast.AssignStmt: - lhs := make([]ast.Expr, len(s.Lhs)) - for i, x := range s.Lhs { - lhs[i] = c.simplifyExpr(stmts, x) - } - rhs := make([]ast.Expr, len(s.Rhs)) - for i, x := range s.Rhs { - rhs[i] = c.simplifyExpr2(stmts, x, true) - } - *stmts = append(*stmts, &ast.AssignStmt{ - Lhs: lhs, - Tok: s.Tok, - TokPos: s.TokPos, - Rhs: rhs, - }) - - case *ast.DeclStmt: - *stmts = append(*stmts, &ast.DeclStmt{ - Decl: c.simplifyGenDecl(stmts, s.Decl.(*ast.GenDecl)), - }) - - case *ast.IfStmt: - if s.Init != nil { - block := &ast.BlockStmt{} - *stmts = append(*stmts, block) - stmts = &block.List - c.simplifyStmt(stmts, s.Init) - } - newS := &ast.IfStmt{ - If: s.If, - Cond: c.simplifyExpr(stmts, s.Cond), - Body: c.simplifyBlock(s.Body), - Else: c.toElseBranch(c.simplifyToStmtList(s.Else), c.info.Scopes[s.Else]), - } - c.info.Scopes[newS] = c.info.Scopes[s] - *stmts = append(*stmts, newS) - - case *ast.SwitchStmt: - c.simplifySwitch(stmts, s) - - case *ast.TypeSwitchStmt: - if s.Init != nil { - block := &ast.BlockStmt{} - *stmts = append(*stmts, block) - stmts = &block.List - c.simplifyStmt(stmts, s.Init) - } - var assign ast.Stmt - switch a := s.Assign.(type) { - case *ast.ExprStmt: - ta := a.X.(*ast.TypeAssertExpr) - assign = &ast.ExprStmt{ - X: &ast.TypeAssertExpr{ - X: c.simplifyExpr(stmts, ta.X), - Lparen: ta.Lparen, - Type: ta.Type, - Rparen: ta.Rparen, - }, - } - case *ast.AssignStmt: - ta := a.Rhs[0].(*ast.TypeAssertExpr) - assign = &ast.AssignStmt{ - Lhs: a.Lhs, - Tok: a.Tok, - TokPos: a.TokPos, - Rhs: []ast.Expr{ - &ast.TypeAssertExpr{ - X: c.simplifyExpr(stmts, ta.X), - Lparen: ta.Lparen, - Type: ta.Type, - Rparen: ta.Rparen, - }, - }, - } - default: - panic("unexpected type switch assign") - } - clauses := make([]ast.Stmt, len(s.Body.List)) - for i, ccs := range s.Body.List { - cc := ccs.(*ast.CaseClause) - newClause := &ast.CaseClause{ - Case: cc.Case, - List: cc.List, - Colon: cc.Colon, - Body: c.simplifyStmtList(cc.Body), - } - if implicit, ok := c.info.Implicits[cc]; ok { - c.info.Implicits[newClause] = implicit - } - clauses[i] = newClause - } - newS := &ast.TypeSwitchStmt{ - Switch: s.Switch, - Assign: assign, - Body: &ast.BlockStmt{ - List: clauses, - }, - } - c.info.Scopes[newS] = c.info.Scopes[s] - *stmts = append(*stmts, newS) - - case *ast.ForStmt: - newS := &ast.ForStmt{ - For: s.For, - Init: s.Init, - Cond: s.Cond, - Post: s.Post, - Body: c.simplifyBlock(s.Body), - } - c.info.Scopes[newS] = c.info.Scopes[s] - *stmts = append(*stmts, newS) - - // case *ast.ForStmt: - // c.simplifyStmt(stmts, s.Init) - // var condStmts []ast.Stmt - // cond := c.newVar(&condStmts, s.Cond) - // bodyStmts := s.Body.List - // if len(condStmts) != 0 { - // bodyStmts = append(append(condStmts, &ast.IfStmt{ - // Cond: &ast.UnaryExpr{ - // Op: token.NOT, - // X: cond, - // }, - // Body: &ast.BlockStmt{ - // List: []ast.Stmt{&ast.BranchStmt{ - // Tok: token.BREAK, - // }}, - // }, - // }), bodyStmts...) - // cond = nil - // } - // *stmts = append(*stmts, &ast.ForStmt{ - // For: s.For, - // Cond: cond, - // Post: s.Post, - // Body: &ast.BlockStmt{ - // List: bodyStmts, - // }, - // }) - - case *ast.RangeStmt: - var newS ast.Stmt - switch t := c.info.TypeOf(s.X).Underlying().(type) { - case *types.Chan: - key := s.Key - tok := s.Tok - if key == nil { - key = ast.NewIdent("_") - tok = token.DEFINE - } - okVar := c.newIdent(types.Typ[types.Bool]) - if s.Tok == token.ASSIGN { - *stmts = append(*stmts, &ast.DeclStmt{ - Decl: &ast.GenDecl{ - Tok: token.VAR, - Specs: []ast.Spec{&ast.ValueSpec{ - Names: []*ast.Ident{okVar}, - Type: ast.NewIdent("bool"), - }}, - }, - }) - } - newS = &ast.ForStmt{ - For: s.For, - Body: &ast.BlockStmt{ - Lbrace: s.Body.Lbrace, - List: append([]ast.Stmt{ - &ast.AssignStmt{ - Lhs: []ast.Expr{key, okVar}, - TokPos: s.TokPos, - Tok: tok, - Rhs: []ast.Expr{c.setType(&ast.UnaryExpr{ - Op: token.ARROW, - X: c.newVar(stmts, s.X), - }, types.NewTuple( - types.NewVar(token.NoPos, nil, "", t.Elem()), - types.NewVar(token.NoPos, nil, "", types.Typ[types.Bool]), - ))}, - }, - &ast.IfStmt{ - Cond: c.setType(&ast.UnaryExpr{ - Op: token.NOT, - X: okVar, - }, types.Typ[types.Bool]), - Body: &ast.BlockStmt{ - List: []ast.Stmt{ - &ast.BranchStmt{Tok: token.BREAK}, - }, - }, - }, - }, c.simplifyStmtList(s.Body.List)...), - Rbrace: s.Body.Rbrace, - }, - } - - default: - newS = &ast.RangeStmt{ - For: s.For, - Key: s.Key, - Value: s.Value, - TokPos: s.TokPos, - Tok: s.Tok, - X: s.X, - Body: c.simplifyBlock(s.Body), - } - } - c.info.Scopes[newS] = c.info.Scopes[s] - *stmts = append(*stmts, newS) - - case *ast.IncDecStmt: - *stmts = append(*stmts, &ast.IncDecStmt{ - X: c.simplifyExpr(stmts, s.X), - TokPos: s.TokPos, - Tok: s.Tok, - }) - - case *ast.GoStmt: - *stmts = append(*stmts, &ast.GoStmt{ - Go: s.Go, - Call: c.simplifyCall(stmts, s.Call), - }) - - case *ast.SelectStmt: - clauses := make([]ast.Stmt, len(s.Body.List)) - for i, entry := range s.Body.List { - cc := entry.(*ast.CommClause) - var newComm ast.Stmt - var bodyPrefix []ast.Stmt - switch comm := cc.Comm.(type) { - case *ast.ExprStmt: - recv := comm.X.(*ast.UnaryExpr) - if recv.Op != token.ARROW { - panic("unexpected comm clause") - } - newComm = &ast.ExprStmt{ - X: &ast.UnaryExpr{ - Op: token.ARROW, - OpPos: recv.OpPos, - X: c.simplifyExpr(stmts, recv.X), - }, - } - case *ast.AssignStmt: - recv := comm.Rhs[0].(*ast.UnaryExpr) - if recv.Op != token.ARROW { - panic("unexpected comm clause") - } - simplifyLhs := false - for _, x := range comm.Lhs { - if c.simplifyCalls && ContainsCall(x) { - simplifyLhs = true - } - } - lhs := comm.Lhs - tok := comm.Tok - if simplifyLhs { - for i, x := range lhs { - id := c.newIdent(c.info.TypeOf(x)) - bodyPrefix = append(bodyPrefix, simpleAssign(c.simplifyExpr(&bodyPrefix, x), comm.Tok, id)) - lhs[i] = id - } - tok = token.DEFINE - } - newComm = &ast.AssignStmt{ - Lhs: lhs, - Tok: tok, - Rhs: []ast.Expr{c.simplifyExpr(stmts, recv)}, - } - case *ast.SendStmt: - newComm = &ast.SendStmt{ - Chan: c.simplifyExpr(stmts, comm.Chan), - Arrow: comm.Arrow, - Value: c.simplifyExpr(stmts, comm.Value), - } - case nil: - newComm = nil - default: - panic("unexpected comm clause") - } - newCC := &ast.CommClause{ - Case: cc.Case, - Comm: newComm, - Colon: cc.Colon, - Body: append(bodyPrefix, c.simplifyStmtList(cc.Body)...), - } - c.info.Scopes[newCC] = c.info.Scopes[cc] - clauses[i] = newCC - } - *stmts = append(*stmts, &ast.SelectStmt{ - Select: s.Select, - Body: &ast.BlockStmt{ - List: clauses, - }, - }) - - case *ast.DeferStmt: - *stmts = append(*stmts, &ast.DeferStmt{ - Defer: s.Defer, - Call: c.simplifyCall(stmts, s.Call), - }) - - case *ast.SendStmt: - *stmts = append(*stmts, &ast.SendStmt{ - Chan: c.simplifyExpr(stmts, s.Chan), - Arrow: s.Arrow, - Value: c.simplifyExpr(stmts, s.Value), - }) - - case *ast.ReturnStmt: - *stmts = append(*stmts, &ast.ReturnStmt{ - Return: s.Return, - Results: c.simplifyExprList(stmts, s.Results), - }) - - default: - *stmts = append(*stmts, s) - } -} - -func (c *simplifyContext) simplifyBlock(s *ast.BlockStmt) *ast.BlockStmt { - if s == nil { - return nil - } - newS := &ast.BlockStmt{ - Lbrace: s.Lbrace, - List: c.simplifyStmtList(s.List), - Rbrace: s.Rbrace, - } - c.info.Scopes[newS] = c.info.Scopes[s] - return newS -} - -func (c *simplifyContext) simplifySwitch(stmts *[]ast.Stmt, s *ast.SwitchStmt) { - wrapClause := &ast.CaseClause{} - newS := &ast.SwitchStmt{ - Switch: s.Switch, - Body: &ast.BlockStmt{List: []ast.Stmt{wrapClause}}, - } - c.info.Scopes[newS] = c.info.Scopes[s] - c.info.Scopes[wrapClause] = c.info.Scopes[s] - *stmts = append(*stmts, newS) - stmts = &wrapClause.Body - - c.simplifyStmt(stmts, s.Init) - - nonDefaultClauses, defaultClause := c.simplifyCaseClauses(s.Body.List) - tag := c.makeTag(stmts, s.Tag, len(nonDefaultClauses) != 0) - *stmts = append(*stmts, unwrapBlock(c.switchToIfElse(tag, nonDefaultClauses, defaultClause))...) -} - -func (c *simplifyContext) makeTag(stmts *[]ast.Stmt, tag ast.Expr, needsTag bool) ast.Expr { - if tag == nil { - id := ast.NewIdent("true") - c.info.Types[id] = types.TypeAndValue{Type: types.Typ[types.Bool], Value: constant.MakeBool(true)} - return id - } - if !needsTag { - *stmts = append(*stmts, simpleAssign(ast.NewIdent("_"), token.ASSIGN, tag)) - return nil - } - return c.newVar(stmts, tag) -} - -func (c *simplifyContext) simplifyCaseClauses(clauses []ast.Stmt) (nonDefaultClauses []*ast.CaseClause, defaultClause *ast.CaseClause) { - var openClauses []*ast.CaseClause - for _, cc := range clauses { - clause := cc.(*ast.CaseClause) - newClause := &ast.CaseClause{ - Case: clause.Case, - List: clause.List, - Colon: clause.Colon, - } - c.info.Scopes[newClause] = c.info.Scopes[clause] - - body := clause.Body - hasFallthrough := false - if len(body) != 0 { - if b, isBranchStmt := body[len(body)-1].(*ast.BranchStmt); isBranchStmt && b.Tok == token.FALLTHROUGH { - body = body[:len(body)-1] - hasFallthrough = true - } - } - openClauses = append(openClauses, newClause) - for _, openClause := range openClauses { - openClause.Body = append(openClause.Body, body...) - } - if !hasFallthrough { - openClauses = nil - } - - if len(clause.List) == 0 { - defaultClause = newClause - continue - } - nonDefaultClauses = append(nonDefaultClauses, newClause) - } - return -} - -func (c *simplifyContext) switchToIfElse(tag ast.Expr, nonDefaultClauses []*ast.CaseClause, defaultClause *ast.CaseClause) ast.Stmt { - if len(nonDefaultClauses) == 0 { - if defaultClause != nil { - return c.toElseBranch(c.simplifyStmtList(defaultClause.Body), c.info.Scopes[defaultClause]) - } - return nil - } - - clause := nonDefaultClauses[0] - conds := make([]ast.Expr, len(clause.List)) - for i, cond := range clause.List { - conds[i] = c.setType(&ast.BinaryExpr{ - X: tag, - Op: token.EQL, - Y: c.setType(&ast.ParenExpr{X: cond}, c.info.TypeOf(cond)), - }, types.Typ[types.Bool]) - } - - var stmts []ast.Stmt - ifStmt := &ast.IfStmt{ - If: clause.Case, - Cond: c.simplifyExpr(&stmts, c.disjunction(conds)), - Body: &ast.BlockStmt{List: c.simplifyStmtList(clause.Body)}, - Else: c.switchToIfElse(tag, nonDefaultClauses[1:], defaultClause), - } - c.info.Scopes[ifStmt] = c.info.Scopes[clause] - stmts = append(stmts, ifStmt) - return c.toElseBranch(stmts, c.info.Scopes[clause]) -} - -func (c *simplifyContext) disjunction(conds []ast.Expr) ast.Expr { - if len(conds) == 1 { - return conds[0] - } - return c.setType(&ast.BinaryExpr{ - X: conds[0], - Op: token.LOR, - Y: c.disjunction(conds[1:]), - }, types.Typ[types.Bool]) -} - -func (c *simplifyContext) simplifyToStmtList(s ast.Stmt) (stmts []ast.Stmt) { - c.simplifyStmt(&stmts, s) - return -} - -func (c *simplifyContext) toElseBranch(stmts []ast.Stmt, scope *types.Scope) ast.Stmt { - if len(stmts) == 0 { - return nil - } - if len(stmts) == 1 { - switch stmt := stmts[0].(type) { - case *ast.IfStmt, *ast.BlockStmt: - c.info.Scopes[stmt] = scope - return stmt - } - } - block := &ast.BlockStmt{ - List: stmts, - } - c.info.Scopes[block] = scope - return block -} - -func unwrapBlock(s ast.Stmt) []ast.Stmt { - if s == nil { - return nil - } - if block, ok := s.(*ast.BlockStmt); ok { - return block.List - } - return []ast.Stmt{s} -} - -func (c *simplifyContext) simplifyExpr(stmts *[]ast.Stmt, x ast.Expr) ast.Expr { - return c.simplifyExpr2(stmts, x, false) -} - -func (c *simplifyContext) simplifyExpr2(stmts *[]ast.Stmt, x ast.Expr, callOK bool) ast.Expr { - x2 := c.simplifyExpr3(stmts, x, callOK) - if t, ok := c.info.Types[x]; ok { - c.info.Types[x2] = t - } - return x2 -} - -func (c *simplifyContext) simplifyExpr3(stmts *[]ast.Stmt, x ast.Expr, callOK bool) ast.Expr { - switch x := x.(type) { - case *ast.FuncLit: - return &ast.FuncLit{ - Type: x.Type, - Body: &ast.BlockStmt{ - List: c.simplifyStmtList(x.Body.List), - }, - } - - case *ast.CompositeLit: - elts := make([]ast.Expr, len(x.Elts)) - for i, elt := range x.Elts { - if kv, ok := elt.(*ast.KeyValueExpr); ok { - elts[i] = &ast.KeyValueExpr{ - Key: kv.Key, - Colon: kv.Colon, - Value: c.simplifyExpr(stmts, kv.Value), - } - continue - } - elts[i] = c.simplifyExpr(stmts, elt) - } - return &ast.CompositeLit{ - Type: x.Type, - Lbrace: x.Lbrace, - Elts: elts, - Rbrace: x.Rbrace, - } - - case *ast.ParenExpr: - return &ast.ParenExpr{ - Lparen: x.Lparen, - X: c.simplifyExpr(stmts, x.X), - Rparen: x.Rparen, - } - - case *ast.SelectorExpr: - selExpr := &ast.SelectorExpr{ - X: c.simplifyExpr(stmts, x.X), - Sel: x.Sel, - } - if sel, ok := c.info.Selections[x]; ok { - c.info.Selections[selExpr] = sel - } - return selExpr - - case *ast.IndexExpr: - return &ast.IndexExpr{ - X: c.simplifyExpr(stmts, x.X), - Lbrack: x.Lbrack, - Index: c.simplifyExpr(stmts, x.Index), - Rbrack: x.Rbrack, - } - - case *ast.SliceExpr: - return &ast.SliceExpr{ - X: c.simplifyExpr(stmts, x.X), - Lbrack: x.Lbrack, - Low: c.simplifyExpr(stmts, x.Low), - High: c.simplifyExpr(stmts, x.High), - Max: c.simplifyExpr(stmts, x.Max), - Slice3: x.Slice3, - Rbrack: x.Rbrack, - } - - case *ast.TypeAssertExpr: - return &ast.TypeAssertExpr{ - X: c.simplifyExpr(stmts, x.X), - Lparen: x.Lparen, - Type: x.Type, - Rparen: x.Rparen, - } - - case *ast.CallExpr: - call := c.simplifyCall(stmts, x) - if callOK || !c.simplifyCalls { - return call - } - return c.newVar(stmts, call) - - case *ast.StarExpr: - return &ast.StarExpr{ - Star: x.Star, - X: c.simplifyExpr(stmts, x.X), - } - - case *ast.UnaryExpr: - return &ast.UnaryExpr{ - OpPos: x.OpPos, - Op: x.Op, - X: c.simplifyExpr(stmts, x.X), - } - - case *ast.BinaryExpr: - if (x.Op == token.LAND || x.Op == token.LOR) && c.simplifyCalls && ContainsCall(x.Y) { - v := c.newVar(stmts, x.X) - cond := v - if x.Op == token.LOR { - cond = &ast.UnaryExpr{ - Op: token.NOT, - X: cond, - } - } - var ifBody []ast.Stmt - ifBody = append(ifBody, simpleAssign(v, token.ASSIGN, c.simplifyExpr2(&ifBody, x.Y, true))) - *stmts = append(*stmts, &ast.IfStmt{ - Cond: cond, - Body: &ast.BlockStmt{ - List: ifBody, - }, - }) - return v - } - return &ast.BinaryExpr{ - X: c.simplifyExpr(stmts, x.X), - OpPos: x.OpPos, - Op: x.Op, - Y: c.simplifyExpr(stmts, x.Y), - } - - default: - return x - } -} - -func (c *simplifyContext) simplifyCall(stmts *[]ast.Stmt, x *ast.CallExpr) *ast.CallExpr { - return &ast.CallExpr{ - Fun: c.simplifyExpr(stmts, x.Fun), - Lparen: x.Lparen, - Args: c.simplifyArgs(stmts, x.Args), - Ellipsis: x.Ellipsis, - Rparen: x.Rparen, - } -} - -func (c *simplifyContext) simplifyArgs(stmts *[]ast.Stmt, args []ast.Expr) []ast.Expr { - if len(args) == 1 { - if tuple, ok := c.info.TypeOf(args[0]).(*types.Tuple); ok && c.simplifyCalls { - call := c.simplifyExpr2(stmts, args[0], true) - vars := make([]ast.Expr, tuple.Len()) - for i := range vars { - vars[i] = c.newIdent(tuple.At(i).Type()) - } - *stmts = append(*stmts, &ast.AssignStmt{ - Lhs: vars, - Tok: token.DEFINE, - Rhs: []ast.Expr{call}, - }) - return vars - } - } - return c.simplifyExprList(stmts, args) -} - -func (c *simplifyContext) simplifyExprList(stmts *[]ast.Stmt, exprs []ast.Expr) []ast.Expr { - if exprs == nil { - return nil - } - simplifiedExprs := make([]ast.Expr, len(exprs)) - for i, expr := range exprs { - simplifiedExprs[i] = c.simplifyExpr(stmts, expr) - } - return simplifiedExprs -} - -func (c *simplifyContext) newVar(stmts *[]ast.Stmt, x ast.Expr) ast.Expr { - id := c.newIdent(c.info.TypeOf(x)) - *stmts = append(*stmts, simpleAssign(id, token.DEFINE, x)) - return id -} - -func (c *simplifyContext) newIdent(t types.Type) *ast.Ident { - c.varCounter++ - id := ast.NewIdent(fmt.Sprintf("_%d", c.varCounter)) - c.info.Types[id] = types.TypeAndValue{Type: t} // TODO remove? - c.info.Uses[id] = types.NewVar(token.NoPos, nil, id.Name, t) - return id -} - -func (c *simplifyContext) setType(x ast.Expr, t types.Type) ast.Expr { - c.info.Types[x] = types.TypeAndValue{Type: t} - return x -} - -func simpleAssign(lhs ast.Expr, tok token.Token, rhs ast.Expr) *ast.AssignStmt { - return &ast.AssignStmt{ - Lhs: []ast.Expr{lhs}, - Tok: tok, - Rhs: []ast.Expr{rhs}, - } -} - -func ContainsCall(x ast.Expr) bool { - switch x := x.(type) { - case *ast.CallExpr: - return true - case *ast.CompositeLit: - for _, elt := range x.Elts { - if ContainsCall(elt) { - return true - } - } - return false - case *ast.KeyValueExpr: - return ContainsCall(x.Key) || ContainsCall(x.Value) - case *ast.ParenExpr: - return ContainsCall(x.X) - case *ast.SelectorExpr: - return ContainsCall(x.X) - case *ast.IndexExpr: - return ContainsCall(x.X) || ContainsCall(x.Index) - case *ast.SliceExpr: - return ContainsCall(x.X) || ContainsCall(x.Low) || ContainsCall(x.High) || ContainsCall(x.Max) - case *ast.TypeAssertExpr: - return ContainsCall(x.X) - case *ast.StarExpr: - return ContainsCall(x.X) - case *ast.UnaryExpr: - return ContainsCall(x.X) - case *ast.BinaryExpr: - return ContainsCall(x.X) || ContainsCall(x.Y) - default: - return false - } -} diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 000000000..ce9d7cded --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 273db3c98..54dfdcb12 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -1,4 +1,4 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) Package errors provides simple error handling primitives. @@ -41,12 +41,19 @@ default: [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. -## Licence +## License BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 842ee8045..161aea258 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -6,7 +6,7 @@ // return err // } // -// which applied recursively up the call stack results in error reports +// which when applied recursively up the call stack results in error reports // without context or debugging information. The errors package allows // programmers to add context to the failure path in their code in a way // that does not destroy the original value of the error. @@ -15,16 +15,17 @@ // // The errors.Wrap function returns a new error that adds context to the // original error by recording a stack trace at the point Wrap is called, -// and the supplied message. For example +// together with the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // -// If additional control is required the errors.WithStack and errors.WithMessage -// functions destructure errors.Wrap into its component operations of annotating -// an error with a stack trace and an a message, respectively. +// If additional control is required, the errors.WithStack and +// errors.WithMessage functions destructure errors.Wrap into its component +// operations: annotating an error with a stack trace and with a message, +// respectively. // // Retrieving the cause of an error // @@ -38,7 +39,7 @@ // } // // can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error which does not implement causer, which is assumed to be +// the topmost error that does not implement causer, which is assumed to be // the original cause. For example: // // switch err := errors.Cause(err).(type) { @@ -48,16 +49,16 @@ // // unknown error // } // -// causer interface is not exported by this package, but is considered a part -// of stable public API. +// Although the causer interface is not exported by this package, it is +// considered a part of its stable public interface. // // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported +// be formatted by the fmt package. The following verbs are supported: // // %s print the error. If the error has a Cause it will be -// printed recursively +// printed recursively. // %v see %s // %+v extended format. Each Frame of the error's StackTrace will // be printed in detail. @@ -65,13 +66,13 @@ // Retrieving the stack trace of an error or wrapper // // New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface. +// invoked. This information can be retrieved with the following interface: // // type stackTracer interface { // StackTrace() errors.StackTrace // } // -// Where errors.StackTrace is defined as +// The returned errors.StackTrace type is defined as // // type StackTrace []Frame // @@ -81,12 +82,12 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // -// stackTracer interface is not exported by this package, but is considered a part -// of stable public API. +// Although the stackTracer interface is not exported by this package, it is +// considered a part of its stable public interface. // // See the documentation for Frame.Format for more details. package errors @@ -158,6 +159,9 @@ type withStack struct { func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -192,7 +196,7 @@ func Wrap(err error, message string) error { } // Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is call, and the format specifier. +// at the point Wrapf is called, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { @@ -220,6 +224,18 @@ func WithMessage(err error, message string) error { } } +// WithMessagef annotates err with the format specifier. +// If err is nil, WithMessagef returns nil. +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } +} + type withMessage struct { cause error msg string @@ -228,6 +244,9 @@ type withMessage struct { func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 000000000..be0d10d0c --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 6b1f2891a..779a8348f 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -5,10 +5,13 @@ import ( "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ func (f Frame) line() int { return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -46,29 +58,24 @@ func (f Frame) line() int { // // Format accepts flags that alter the printing of some verbs, as follows: // -// %+s path of source file relative to the compile time GOPATH +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) // %+v equivalent to %+s:%d func (f Frame) Format(s fmt.State, verb rune) { switch verb { case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -76,25 +83,57 @@ func (f Frame) Format(s fmt.State, verb rune) { } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) } + io.WriteString(s, "]") } // stack represents a stack of program counters. @@ -136,43 +175,3 @@ func funcname(name string) string { i = strings.Index(name, ".") return name[i+1:] } - -func trimGOPATH(name, file string) string { - // Here we want to get the source file path relative to the compile time - // GOPATH. As of Go 1.6.x there is no direct way to know the compiled - // GOPATH at runtime, but we can infer the number of path segments in the - // GOPATH. We note that fn.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired output. We count separators from the end of the file - // path until it finds two more than in the function name and then move - // one character forward to preserve the initial path segment without a - // leading separator. - const sep = "/" - goal := strings.Count(name, sep) + 2 - i := len(file) - for n := 0; n < goal; n++ { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - // not enough separators found, set i so that the slice expression - // below leaves file unmodified - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - file = file[i+len(sep):] - return file -} diff --git a/vendor/golang.org/x/image/draw/gen.go b/vendor/golang.org/x/image/draw/gen.go deleted file mode 100644 index 822bb6a50..000000000 --- a/vendor/golang.org/x/image/draw/gen.go +++ /dev/null @@ -1,1404 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "strings" -) - -var debug = flag.Bool("debug", false, "") - -func main() { - flag.Parse() - - w := new(bytes.Buffer) - w.WriteString("// generated by \"go run gen.go\". DO NOT EDIT.\n\n" + - "package draw\n\nimport (\n" + - "\"image\"\n" + - "\"image/color\"\n" + - "\"math\"\n" + - "\n" + - "\"golang.org/x/image/math/f64\"\n" + - ")\n") - - gen(w, "nnInterpolator", codeNNScaleLeaf, codeNNTransformLeaf) - gen(w, "ablInterpolator", codeABLScaleLeaf, codeABLTransformLeaf) - genKernel(w) - - if *debug { - os.Stdout.Write(w.Bytes()) - return - } - out, err := format.Source(w.Bytes()) - if err != nil { - log.Fatal(err) - } - if err := ioutil.WriteFile("impl.go", out, 0660); err != nil { - log.Fatal(err) - } -} - -var ( - // dsTypes are the (dst image type, src image type) pairs to generate - // scale_DType_SType implementations for. The last element in the slice - // should be the fallback pair ("Image", "image.Image"). - // - // TODO: add *image.CMYK src type after Go 1.5 is released. - // An *image.CMYK is also alwaysOpaque. - dsTypes = []struct{ dType, sType string }{ - {"*image.RGBA", "*image.Gray"}, - {"*image.RGBA", "*image.NRGBA"}, - {"*image.RGBA", "*image.RGBA"}, - {"*image.RGBA", "*image.YCbCr"}, - {"*image.RGBA", "image.Image"}, - {"Image", "image.Image"}, - } - dTypes, sTypes []string - sTypesForDType = map[string][]string{} - subsampleRatios = []string{ - "444", - "422", - "420", - "440", - } - ops = []string{"Over", "Src"} - // alwaysOpaque are those image.Image implementations that are always - // opaque. For these types, Over is equivalent to the faster Src, in the - // absence of a source mask. - alwaysOpaque = map[string]bool{ - "*image.Gray": true, - "*image.YCbCr": true, - } -) - -func init() { - dTypesSeen := map[string]bool{} - sTypesSeen := map[string]bool{} - for _, t := range dsTypes { - if !sTypesSeen[t.sType] { - sTypesSeen[t.sType] = true - sTypes = append(sTypes, t.sType) - } - if !dTypesSeen[t.dType] { - dTypesSeen[t.dType] = true - dTypes = append(dTypes, t.dType) - } - sTypesForDType[t.dType] = append(sTypesForDType[t.dType], t.sType) - } - sTypesForDType["anyDType"] = sTypes -} - -type data struct { - dType string - sType string - sratio string - receiver string - op string -} - -func gen(w *bytes.Buffer, receiver string, codes ...string) { - expn(w, codeRoot, &data{receiver: receiver}) - for _, code := range codes { - for _, t := range dsTypes { - for _, op := range ops { - if op == "Over" && alwaysOpaque[t.sType] { - continue - } - expn(w, code, &data{ - dType: t.dType, - sType: t.sType, - receiver: receiver, - op: op, - }) - } - } - } -} - -func genKernel(w *bytes.Buffer) { - expn(w, codeKernelRoot, &data{}) - for _, sType := range sTypes { - expn(w, codeKernelScaleLeafX, &data{ - sType: sType, - }) - } - for _, dType := range dTypes { - for _, op := range ops { - expn(w, codeKernelScaleLeafY, &data{ - dType: dType, - op: op, - }) - } - } - for _, t := range dsTypes { - for _, op := range ops { - if op == "Over" && alwaysOpaque[t.sType] { - continue - } - expn(w, codeKernelTransformLeaf, &data{ - dType: t.dType, - sType: t.sType, - op: op, - }) - } - } -} - -func expn(w *bytes.Buffer, code string, d *data) { - if d.sType == "*image.YCbCr" && d.sratio == "" { - for _, sratio := range subsampleRatios { - e := *d - e.sratio = sratio - expn(w, code, &e) - } - return - } - - for _, line := range strings.Split(code, "\n") { - line = expnLine(line, d) - if line == ";" { - continue - } - fmt.Fprintln(w, line) - } -} - -func expnLine(line string, d *data) string { - for { - i := strings.IndexByte(line, '$') - if i < 0 { - break - } - prefix, s := line[:i], line[i+1:] - - i = len(s) - for j, c := range s { - if !('A' <= c && c <= 'Z' || 'a' <= c && c <= 'z') { - i = j - break - } - } - dollar, suffix := s[:i], s[i:] - - e := expnDollar(prefix, dollar, suffix, d) - if e == "" { - log.Fatalf("couldn't expand %q", line) - } - line = e - } - return line -} - -// expnDollar expands a "$foo" fragment in a line of generated code. It returns -// the empty string if there was a problem. It returns ";" if the generated -// code is a no-op. -func expnDollar(prefix, dollar, suffix string, d *data) string { - switch dollar { - case "dType": - return prefix + d.dType + suffix - case "dTypeRN": - return prefix + relName(d.dType) + suffix - case "sratio": - return prefix + d.sratio + suffix - case "sType": - return prefix + d.sType + suffix - case "sTypeRN": - return prefix + relName(d.sType) + suffix - case "receiver": - return prefix + d.receiver + suffix - case "op": - return prefix + d.op + suffix - - case "switch": - return expnSwitch("", "", true, suffix) - case "switchD": - return expnSwitch("", "", false, suffix) - case "switchS": - return expnSwitch("", "anyDType", false, suffix) - - case "preOuter": - switch d.dType { - default: - return ";" - case "Image": - s := "" - if d.sType == "image.Image" { - s = "srcMask, smp := opts.SrcMask, opts.SrcMaskP\n" - } - return s + - "dstMask, dmp := opts.DstMask, opts.DstMaskP\n" + - "dstColorRGBA64 := &color.RGBA64{}\n" + - "dstColor := color.Color(dstColorRGBA64)" - } - - case "preInner": - switch d.dType { - default: - return ";" - case "*image.RGBA": - return "d := " + pixOffset("dst", "dr.Min.X+adr.Min.X", "dr.Min.Y+int(dy)", "*4", "*dst.Stride") - } - - case "preKernelOuter": - switch d.sType { - default: - return ";" - case "image.Image": - return "srcMask, smp := opts.SrcMask, opts.SrcMaskP" - } - - case "preKernelInner": - switch d.dType { - default: - return ";" - case "*image.RGBA": - return "d := " + pixOffset("dst", "dr.Min.X+int(dx)", "dr.Min.Y+adr.Min.Y", "*4", "*dst.Stride") - } - - case "blend": - args, _ := splitArgs(suffix) - if len(args) != 4 { - return "" - } - switch d.sType { - default: - return argf(args, ""+ - "$3r = $0*$1r + $2*$3r\n"+ - "$3g = $0*$1g + $2*$3g\n"+ - "$3b = $0*$1b + $2*$3b\n"+ - "$3a = $0*$1a + $2*$3a", - ) - case "*image.Gray": - return argf(args, ""+ - "$3r = $0*$1r + $2*$3r", - ) - case "*image.YCbCr": - return argf(args, ""+ - "$3r = $0*$1r + $2*$3r\n"+ - "$3g = $0*$1g + $2*$3g\n"+ - "$3b = $0*$1b + $2*$3b", - ) - } - - case "clampToAlpha": - if alwaysOpaque[d.sType] { - return ";" - } - // Go uses alpha-premultiplied color. The naive computation can lead to - // invalid colors, e.g. red > alpha, when some weights are negative. - return ` - if pr > pa { - pr = pa - } - if pg > pa { - pg = pa - } - if pb > pa { - pb = pa - } - ` - - case "convFtou": - args, _ := splitArgs(suffix) - if len(args) != 2 { - return "" - } - - switch d.sType { - default: - return argf(args, ""+ - "$0r := uint32($1r)\n"+ - "$0g := uint32($1g)\n"+ - "$0b := uint32($1b)\n"+ - "$0a := uint32($1a)", - ) - case "*image.Gray": - return argf(args, ""+ - "$0r := uint32($1r)", - ) - case "*image.YCbCr": - return argf(args, ""+ - "$0r := uint32($1r)\n"+ - "$0g := uint32($1g)\n"+ - "$0b := uint32($1b)", - ) - } - - case "outputu": - args, _ := splitArgs(suffix) - if len(args) != 3 { - return "" - } - - switch d.op { - case "Over": - switch d.dType { - default: - log.Fatalf("bad dType %q", d.dType) - case "Image": - return argf(args, ""+ - "qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ - "if dstMask != nil {\n"+ - " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ - " $2r = $2r * ma / 0xffff\n"+ - " $2g = $2g * ma / 0xffff\n"+ - " $2b = $2b * ma / 0xffff\n"+ - " $2a = $2a * ma / 0xffff\n"+ - "}\n"+ - "$2a1 := 0xffff - $2a\n"+ - "dstColorRGBA64.R = uint16(qr*$2a1/0xffff + $2r)\n"+ - "dstColorRGBA64.G = uint16(qg*$2a1/0xffff + $2g)\n"+ - "dstColorRGBA64.B = uint16(qb*$2a1/0xffff + $2b)\n"+ - "dstColorRGBA64.A = uint16(qa*$2a1/0xffff + $2a)\n"+ - "dst.Set($0, $1, dstColor)", - ) - case "*image.RGBA": - return argf(args, ""+ - "$2a1 := (0xffff - $2a) * 0x101\n"+ - "dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*$2a1/0xffff + $2r) >> 8)\n"+ - "dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*$2a1/0xffff + $2g) >> 8)\n"+ - "dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*$2a1/0xffff + $2b) >> 8)\n"+ - "dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*$2a1/0xffff + $2a) >> 8)", - ) - } - - case "Src": - switch d.dType { - default: - log.Fatalf("bad dType %q", d.dType) - case "Image": - return argf(args, ""+ - "if dstMask != nil {\n"+ - " qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ - " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ - " pr = pr * ma / 0xffff\n"+ - " pg = pg * ma / 0xffff\n"+ - " pb = pb * ma / 0xffff\n"+ - " pa = pa * ma / 0xffff\n"+ - " $2a1 := 0xffff - ma\n"+ // Note that this is ma, not $2a. - " dstColorRGBA64.R = uint16(qr*$2a1/0xffff + $2r)\n"+ - " dstColorRGBA64.G = uint16(qg*$2a1/0xffff + $2g)\n"+ - " dstColorRGBA64.B = uint16(qb*$2a1/0xffff + $2b)\n"+ - " dstColorRGBA64.A = uint16(qa*$2a1/0xffff + $2a)\n"+ - " dst.Set($0, $1, dstColor)\n"+ - "} else {\n"+ - " dstColorRGBA64.R = uint16($2r)\n"+ - " dstColorRGBA64.G = uint16($2g)\n"+ - " dstColorRGBA64.B = uint16($2b)\n"+ - " dstColorRGBA64.A = uint16($2a)\n"+ - " dst.Set($0, $1, dstColor)\n"+ - "}", - ) - case "*image.RGBA": - switch d.sType { - default: - return argf(args, ""+ - "dst.Pix[d+0] = uint8($2r >> 8)\n"+ - "dst.Pix[d+1] = uint8($2g >> 8)\n"+ - "dst.Pix[d+2] = uint8($2b >> 8)\n"+ - "dst.Pix[d+3] = uint8($2a >> 8)", - ) - case "*image.Gray": - return argf(args, ""+ - "out := uint8($2r >> 8)\n"+ - "dst.Pix[d+0] = out\n"+ - "dst.Pix[d+1] = out\n"+ - "dst.Pix[d+2] = out\n"+ - "dst.Pix[d+3] = 0xff", - ) - case "*image.YCbCr": - return argf(args, ""+ - "dst.Pix[d+0] = uint8($2r >> 8)\n"+ - "dst.Pix[d+1] = uint8($2g >> 8)\n"+ - "dst.Pix[d+2] = uint8($2b >> 8)\n"+ - "dst.Pix[d+3] = 0xff", - ) - } - } - } - - case "outputf": - args, _ := splitArgs(suffix) - if len(args) != 5 { - return "" - } - ret := "" - - switch d.op { - case "Over": - switch d.dType { - default: - log.Fatalf("bad dType %q", d.dType) - case "Image": - ret = argf(args, ""+ - "qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ - "$3r0 := uint32($2($3r * $4))\n"+ - "$3g0 := uint32($2($3g * $4))\n"+ - "$3b0 := uint32($2($3b * $4))\n"+ - "$3a0 := uint32($2($3a * $4))\n"+ - "if dstMask != nil {\n"+ - " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ - " $3r0 = $3r0 * ma / 0xffff\n"+ - " $3g0 = $3g0 * ma / 0xffff\n"+ - " $3b0 = $3b0 * ma / 0xffff\n"+ - " $3a0 = $3a0 * ma / 0xffff\n"+ - "}\n"+ - "$3a1 := 0xffff - $3a0\n"+ - "dstColorRGBA64.R = uint16(qr*$3a1/0xffff + $3r0)\n"+ - "dstColorRGBA64.G = uint16(qg*$3a1/0xffff + $3g0)\n"+ - "dstColorRGBA64.B = uint16(qb*$3a1/0xffff + $3b0)\n"+ - "dstColorRGBA64.A = uint16(qa*$3a1/0xffff + $3a0)\n"+ - "dst.Set($0, $1, dstColor)", - ) - case "*image.RGBA": - ret = argf(args, ""+ - "$3r0 := uint32($2($3r * $4))\n"+ - "$3g0 := uint32($2($3g * $4))\n"+ - "$3b0 := uint32($2($3b * $4))\n"+ - "$3a0 := uint32($2($3a * $4))\n"+ - "$3a1 := (0xffff - uint32($3a0)) * 0x101\n"+ - "dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*$3a1/0xffff + $3r0) >> 8)\n"+ - "dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*$3a1/0xffff + $3g0) >> 8)\n"+ - "dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*$3a1/0xffff + $3b0) >> 8)\n"+ - "dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*$3a1/0xffff + $3a0) >> 8)", - ) - } - - case "Src": - switch d.dType { - default: - log.Fatalf("bad dType %q", d.dType) - case "Image": - ret = argf(args, ""+ - "if dstMask != nil {\n"+ - " qr, qg, qb, qa := dst.At($0, $1).RGBA()\n"+ - " _, _, _, ma := dstMask.At(dmp.X + $0, dmp.Y + $1).RGBA()\n"+ - " pr := uint32($2($3r * $4)) * ma / 0xffff\n"+ - " pg := uint32($2($3g * $4)) * ma / 0xffff\n"+ - " pb := uint32($2($3b * $4)) * ma / 0xffff\n"+ - " pa := uint32($2($3a * $4)) * ma / 0xffff\n"+ - " pa1 := 0xffff - ma\n"+ // Note that this is ma, not pa. - " dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr)\n"+ - " dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg)\n"+ - " dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb)\n"+ - " dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa)\n"+ - " dst.Set($0, $1, dstColor)\n"+ - "} else {\n"+ - " dstColorRGBA64.R = $2($3r * $4)\n"+ - " dstColorRGBA64.G = $2($3g * $4)\n"+ - " dstColorRGBA64.B = $2($3b * $4)\n"+ - " dstColorRGBA64.A = $2($3a * $4)\n"+ - " dst.Set($0, $1, dstColor)\n"+ - "}", - ) - case "*image.RGBA": - switch d.sType { - default: - ret = argf(args, ""+ - "dst.Pix[d+0] = uint8($2($3r * $4) >> 8)\n"+ - "dst.Pix[d+1] = uint8($2($3g * $4) >> 8)\n"+ - "dst.Pix[d+2] = uint8($2($3b * $4) >> 8)\n"+ - "dst.Pix[d+3] = uint8($2($3a * $4) >> 8)", - ) - case "*image.Gray": - ret = argf(args, ""+ - "out := uint8($2($3r * $4) >> 8)\n"+ - "dst.Pix[d+0] = out\n"+ - "dst.Pix[d+1] = out\n"+ - "dst.Pix[d+2] = out\n"+ - "dst.Pix[d+3] = 0xff", - ) - case "*image.YCbCr": - ret = argf(args, ""+ - "dst.Pix[d+0] = uint8($2($3r * $4) >> 8)\n"+ - "dst.Pix[d+1] = uint8($2($3g * $4) >> 8)\n"+ - "dst.Pix[d+2] = uint8($2($3b * $4) >> 8)\n"+ - "dst.Pix[d+3] = 0xff", - ) - } - } - } - - return strings.Replace(ret, " * 1)", ")", -1) - - case "srcf", "srcu": - lhs, eqOp := splitEq(prefix) - if lhs == "" { - return "" - } - args, extra := splitArgs(suffix) - if len(args) != 2 { - return "" - } - - tmp := "" - if dollar == "srcf" { - tmp = "u" - } - - // TODO: there's no need to multiply by 0x101 in the switch below if - // the next thing we're going to do is shift right by 8. - - buf := new(bytes.Buffer) - switch d.sType { - default: - log.Fatalf("bad sType %q", d.sType) - case "image.Image": - fmt.Fprintf(buf, ""+ - "%sr%s, %sg%s, %sb%s, %sa%s := src.At(%s, %s).RGBA()\n", - lhs, tmp, lhs, tmp, lhs, tmp, lhs, tmp, args[0], args[1], - ) - if d.dType == "" || d.dType == "Image" { - fmt.Fprintf(buf, ""+ - "if srcMask != nil {\n"+ - " _, _, _, ma := srcMask.At(smp.X+%s, smp.Y+%s).RGBA()\n"+ - " %sr%s = %sr%s * ma / 0xffff\n"+ - " %sg%s = %sg%s * ma / 0xffff\n"+ - " %sb%s = %sb%s * ma / 0xffff\n"+ - " %sa%s = %sa%s * ma / 0xffff\n"+ - "}\n", - args[0], args[1], - lhs, tmp, lhs, tmp, - lhs, tmp, lhs, tmp, - lhs, tmp, lhs, tmp, - lhs, tmp, lhs, tmp, - ) - } - case "*image.Gray": - fmt.Fprintf(buf, ""+ - "%si := %s\n"+ - "%sr%s := uint32(src.Pix[%si]) * 0x101\n", - lhs, pixOffset("src", args[0], args[1], "", "*src.Stride"), - lhs, tmp, lhs, - ) - case "*image.NRGBA": - fmt.Fprintf(buf, ""+ - "%si := %s\n"+ - "%sa%s := uint32(src.Pix[%si+3]) * 0x101\n"+ - "%sr%s := uint32(src.Pix[%si+0]) * %sa%s / 0xff\n"+ - "%sg%s := uint32(src.Pix[%si+1]) * %sa%s / 0xff\n"+ - "%sb%s := uint32(src.Pix[%si+2]) * %sa%s / 0xff\n", - lhs, pixOffset("src", args[0], args[1], "*4", "*src.Stride"), - lhs, tmp, lhs, - lhs, tmp, lhs, lhs, tmp, - lhs, tmp, lhs, lhs, tmp, - lhs, tmp, lhs, lhs, tmp, - ) - case "*image.RGBA": - fmt.Fprintf(buf, ""+ - "%si := %s\n"+ - "%sr%s := uint32(src.Pix[%si+0]) * 0x101\n"+ - "%sg%s := uint32(src.Pix[%si+1]) * 0x101\n"+ - "%sb%s := uint32(src.Pix[%si+2]) * 0x101\n"+ - "%sa%s := uint32(src.Pix[%si+3]) * 0x101\n", - lhs, pixOffset("src", args[0], args[1], "*4", "*src.Stride"), - lhs, tmp, lhs, - lhs, tmp, lhs, - lhs, tmp, lhs, - lhs, tmp, lhs, - ) - case "*image.YCbCr": - fmt.Fprintf(buf, ""+ - "%si := %s\n"+ - "%sj := %s\n"+ - "%s\n", - lhs, pixOffset("src", args[0], args[1], "", "*src.YStride"), - lhs, cOffset(args[0], args[1], d.sratio), - ycbcrToRGB(lhs, tmp), - ) - } - - if dollar == "srcf" { - switch d.sType { - default: - fmt.Fprintf(buf, ""+ - "%sr %s float64(%sru)%s\n"+ - "%sg %s float64(%sgu)%s\n"+ - "%sb %s float64(%sbu)%s\n"+ - "%sa %s float64(%sau)%s\n", - lhs, eqOp, lhs, extra, - lhs, eqOp, lhs, extra, - lhs, eqOp, lhs, extra, - lhs, eqOp, lhs, extra, - ) - case "*image.Gray": - fmt.Fprintf(buf, ""+ - "%sr %s float64(%sru)%s\n", - lhs, eqOp, lhs, extra, - ) - case "*image.YCbCr": - fmt.Fprintf(buf, ""+ - "%sr %s float64(%sru)%s\n"+ - "%sg %s float64(%sgu)%s\n"+ - "%sb %s float64(%sbu)%s\n", - lhs, eqOp, lhs, extra, - lhs, eqOp, lhs, extra, - lhs, eqOp, lhs, extra, - ) - } - } - - return strings.TrimSpace(buf.String()) - - case "tweakD": - if d.dType == "*image.RGBA" { - return "d += dst.Stride" - } - return ";" - - case "tweakDx": - if d.dType == "*image.RGBA" { - return strings.Replace(prefix, "dx++", "dx, d = dx+1, d+4", 1) - } - return prefix - - case "tweakDy": - if d.dType == "*image.RGBA" { - return strings.Replace(prefix, "for dy, s", "for _, s", 1) - } - return prefix - - case "tweakP": - switch d.sType { - case "*image.Gray": - if strings.HasPrefix(strings.TrimSpace(prefix), "pa * ") { - return "1," - } - return "pr," - case "*image.YCbCr": - if strings.HasPrefix(strings.TrimSpace(prefix), "pa * ") { - return "1," - } - } - return prefix - - case "tweakPr": - if d.sType == "*image.Gray" { - return "pr *= s.invTotalWeightFFFF" - } - return ";" - - case "tweakVarP": - switch d.sType { - case "*image.Gray": - return strings.Replace(prefix, "var pr, pg, pb, pa", "var pr", 1) - case "*image.YCbCr": - return strings.Replace(prefix, "var pr, pg, pb, pa", "var pr, pg, pb", 1) - } - return prefix - } - return "" -} - -func expnSwitch(op, dType string, expandBoth bool, template string) string { - if op == "" && dType != "anyDType" { - lines := []string{"switch op {"} - for _, op = range ops { - lines = append(lines, - fmt.Sprintf("case %s:", op), - expnSwitch(op, dType, expandBoth, template), - ) - } - lines = append(lines, "}") - return strings.Join(lines, "\n") - } - - switchVar := "dst" - if dType != "" { - switchVar = "src" - } - lines := []string{fmt.Sprintf("switch %s := %s.(type) {", switchVar, switchVar)} - - fallback, values := "Image", dTypes - if dType != "" { - fallback, values = "image.Image", sTypesForDType[dType] - } - for _, v := range values { - if dType != "" { - // v is the sType. Skip those always-opaque sTypes, where Over is - // equivalent to Src. - if op == "Over" && alwaysOpaque[v] { - continue - } - } - - if v == fallback { - lines = append(lines, "default:") - } else { - lines = append(lines, fmt.Sprintf("case %s:", v)) - } - - if dType != "" { - if v == "*image.YCbCr" { - lines = append(lines, expnSwitchYCbCr(op, dType, template)) - } else { - lines = append(lines, expnLine(template, &data{dType: dType, sType: v, op: op})) - } - } else if !expandBoth { - lines = append(lines, expnLine(template, &data{dType: v, op: op})) - } else { - lines = append(lines, expnSwitch(op, v, false, template)) - } - } - - lines = append(lines, "}") - return strings.Join(lines, "\n") -} - -func expnSwitchYCbCr(op, dType, template string) string { - lines := []string{ - "switch src.SubsampleRatio {", - "default:", - expnLine(template, &data{dType: dType, sType: "image.Image", op: op}), - } - for _, sratio := range subsampleRatios { - lines = append(lines, - fmt.Sprintf("case image.YCbCrSubsampleRatio%s:", sratio), - expnLine(template, &data{dType: dType, sType: "*image.YCbCr", sratio: sratio, op: op}), - ) - } - lines = append(lines, "}") - return strings.Join(lines, "\n") -} - -func argf(args []string, s string) string { - if len(args) > 9 { - panic("too many args") - } - for i, a := range args { - old := fmt.Sprintf("$%d", i) - s = strings.Replace(s, old, a, -1) - } - return s -} - -func pixOffset(m, x, y, xstride, ystride string) string { - return fmt.Sprintf("(%s-%s.Rect.Min.Y)%s + (%s-%s.Rect.Min.X)%s", y, m, ystride, x, m, xstride) -} - -func cOffset(x, y, sratio string) string { - switch sratio { - case "444": - return fmt.Sprintf("( %s - src.Rect.Min.Y )*src.CStride + ( %s - src.Rect.Min.X )", y, x) - case "422": - return fmt.Sprintf("( %s - src.Rect.Min.Y )*src.CStride + ((%s)/2 - src.Rect.Min.X/2)", y, x) - case "420": - return fmt.Sprintf("((%s)/2 - src.Rect.Min.Y/2)*src.CStride + ((%s)/2 - src.Rect.Min.X/2)", y, x) - case "440": - return fmt.Sprintf("((%s)/2 - src.Rect.Min.Y/2)*src.CStride + ( %s - src.Rect.Min.X )", y, x) - } - return fmt.Sprintf("unsupported sratio %q", sratio) -} - -func ycbcrToRGB(lhs, tmp string) string { - s := ` - // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. - $yy1 := int(src.Y[$i]) * 0x10101 - $cb1 := int(src.Cb[$j]) - 128 - $cr1 := int(src.Cr[$j]) - 128 - $r@ := ($yy1 + 91881*$cr1) >> 8 - $g@ := ($yy1 - 22554*$cb1 - 46802*$cr1) >> 8 - $b@ := ($yy1 + 116130*$cb1) >> 8 - if $r@ < 0 { - $r@ = 0 - } else if $r@ > 0xffff { - $r@ = 0xffff - } - if $g@ < 0 { - $g@ = 0 - } else if $g@ > 0xffff { - $g@ = 0xffff - } - if $b@ < 0 { - $b@ = 0 - } else if $b@ > 0xffff { - $b@ = 0xffff - } - ` - s = strings.Replace(s, "$", lhs, -1) - s = strings.Replace(s, "@", tmp, -1) - return s -} - -func split(s, sep string) (string, string) { - if i := strings.Index(s, sep); i >= 0 { - return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+len(sep):]) - } - return "", "" -} - -func splitEq(s string) (lhs, eqOp string) { - s = strings.TrimSpace(s) - if lhs, _ = split(s, ":="); lhs != "" { - return lhs, ":=" - } - if lhs, _ = split(s, "+="); lhs != "" { - return lhs, "+=" - } - return "", "" -} - -func splitArgs(s string) (args []string, extra string) { - s = strings.TrimSpace(s) - if s == "" || s[0] != '[' { - return nil, "" - } - s = s[1:] - - i := strings.IndexByte(s, ']') - if i < 0 { - return nil, "" - } - args, extra = strings.Split(s[:i], ","), s[i+1:] - for i := range args { - args[i] = strings.TrimSpace(args[i]) - } - return args, extra -} - -func relName(s string) string { - if i := strings.LastIndex(s, "."); i >= 0 { - return s[i+1:] - } - return s -} - -const ( - codeRoot = ` - func (z $receiver) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { - // Try to simplify a Scale to a Copy when DstMask is not specified. - // If DstMask is not nil, Copy will call Scale back with same dr and sr, and cause stack overflow. - if dr.Size() == sr.Size() && (opts == nil || opts.DstMask == nil) { - Copy(dst, dr.Min, src, sr, op, opts) - return - } - - var o Options - if opts != nil { - o = *opts - } - - // adr is the affected destination pixels. - adr := dst.Bounds().Intersect(dr) - adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) - if adr.Empty() || sr.Empty() { - return - } - // Make adr relative to dr.Min. - adr = adr.Sub(dr.Min) - if op == Over && o.SrcMask == nil && opaque(src) { - op = Src - } - - // sr is the source pixels. If it extends beyond the src bounds, - // we cannot use the type-specific fast paths, as they access - // the Pix fields directly without bounds checking. - // - // Similarly, the fast paths assume that the masks are nil. - if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { - switch op { - case Over: - z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) - case Src: - z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) - } - } else if _, ok := src.(*image.Uniform); ok { - Draw(dst, dr, src, src.Bounds().Min, op) - } else { - $switch z.scale_$dTypeRN_$sTypeRN$sratio_$op(dst, dr, adr, src, sr, &o) - } - } - - func (z $receiver) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { - // Try to simplify a Transform to a Copy. - if s2d[0] == 1 && s2d[1] == 0 && s2d[3] == 0 && s2d[4] == 1 { - dx := int(s2d[2]) - dy := int(s2d[5]) - if float64(dx) == s2d[2] && float64(dy) == s2d[5] { - Copy(dst, image.Point{X: sr.Min.X + dx, Y: sr.Min.X + dy}, src, sr, op, opts) - return - } - } - - var o Options - if opts != nil { - o = *opts - } - - dr := transformRect(&s2d, &sr) - // adr is the affected destination pixels. - adr := dst.Bounds().Intersect(dr) - adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) - if adr.Empty() || sr.Empty() { - return - } - if op == Over && o.SrcMask == nil && opaque(src) { - op = Src - } - - d2s := invert(&s2d) - // bias is a translation of the mapping from dst coordinates to src - // coordinates such that the latter temporarily have non-negative X - // and Y coordinates. This allows us to write int(f) instead of - // int(math.Floor(f)), since "round to zero" and "round down" are - // equivalent when f >= 0, but the former is much cheaper. The X-- - // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" - // adjustment. - bias := transformRect(&d2s, &adr).Min - bias.X-- - bias.Y-- - d2s[2] -= float64(bias.X) - d2s[5] -= float64(bias.Y) - // Make adr relative to dr.Min. - adr = adr.Sub(dr.Min) - // sr is the source pixels. If it extends beyond the src bounds, - // we cannot use the type-specific fast paths, as they access - // the Pix fields directly without bounds checking. - // - // Similarly, the fast paths assume that the masks are nil. - if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { - switch op { - case Over: - z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) - case Src: - z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) - } - } else if u, ok := src.(*image.Uniform); ok { - transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) - } else { - $switch z.transform_$dTypeRN_$sTypeRN$sratio_$op(dst, dr, adr, &d2s, src, sr, bias, &o) - } - } - ` - - codeNNScaleLeaf = ` - func (nnInterpolator) scale_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, src $sType, sr image.Rectangle, opts *Options) { - dw2 := uint64(dr.Dx()) * 2 - dh2 := uint64(dr.Dy()) * 2 - sw := uint64(sr.Dx()) - sh := uint64(sr.Dy()) - $preOuter - for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { - sy := (2*uint64(dy) + 1) * sh / dh2 - $preInner - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx - sx := (2*uint64(dx) + 1) * sw / dw2 - p := $srcu[sr.Min.X + int(sx), sr.Min.Y + int(sy)] - $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] - } - } - } - ` - - codeNNTransformLeaf = ` - func (nnInterpolator) transform_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, d2s *f64.Aff3, src $sType, sr image.Rectangle, bias image.Point, opts *Options) { - $preOuter - for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { - dyf := float64(dr.Min.Y + int(dy)) + 0.5 - $preInner - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx - dxf := float64(dr.Min.X + int(dx)) + 0.5 - sx0 := int(d2s[0]*dxf + d2s[1]*dyf + d2s[2]) + bias.X - sy0 := int(d2s[3]*dxf + d2s[4]*dyf + d2s[5]) + bias.Y - if !(image.Point{sx0, sy0}).In(sr) { - continue - } - p := $srcu[sx0, sy0] - $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] - } - } - } - ` - - codeABLScaleLeaf = ` - func (ablInterpolator) scale_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, src $sType, sr image.Rectangle, opts *Options) { - sw := int32(sr.Dx()) - sh := int32(sr.Dy()) - yscale := float64(sh) / float64(dr.Dy()) - xscale := float64(sw) / float64(dr.Dx()) - swMinus1, shMinus1 := sw - 1, sh - 1 - $preOuter - - for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { - sy := (float64(dy)+0.5)*yscale - 0.5 - // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if - // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for - // sx, below. - sy0 := int32(sy) - yFrac0 := sy - float64(sy0) - yFrac1 := 1 - yFrac0 - sy1 := sy0 + 1 - if sy < 0 { - sy0, sy1 = 0, 0 - yFrac0, yFrac1 = 0, 1 - } else if sy1 > shMinus1 { - sy0, sy1 = shMinus1, shMinus1 - yFrac0, yFrac1 = 1, 0 - } - $preInner - - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx - sx := (float64(dx)+0.5)*xscale - 0.5 - sx0 := int32(sx) - xFrac0 := sx - float64(sx0) - xFrac1 := 1 - xFrac0 - sx1 := sx0 + 1 - if sx < 0 { - sx0, sx1 = 0, 0 - xFrac0, xFrac1 = 0, 1 - } else if sx1 > swMinus1 { - sx0, sx1 = swMinus1, swMinus1 - xFrac0, xFrac1 = 1, 0 - } - - s00 := $srcf[sr.Min.X + int(sx0), sr.Min.Y + int(sy0)] - s10 := $srcf[sr.Min.X + int(sx1), sr.Min.Y + int(sy0)] - $blend[xFrac1, s00, xFrac0, s10] - s01 := $srcf[sr.Min.X + int(sx0), sr.Min.Y + int(sy1)] - s11 := $srcf[sr.Min.X + int(sx1), sr.Min.Y + int(sy1)] - $blend[xFrac1, s01, xFrac0, s11] - $blend[yFrac1, s10, yFrac0, s11] - $convFtou[p, s11] - $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] - } - } - } - ` - - codeABLTransformLeaf = ` - func (ablInterpolator) transform_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, d2s *f64.Aff3, src $sType, sr image.Rectangle, bias image.Point, opts *Options) { - $preOuter - for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { - dyf := float64(dr.Min.Y + int(dy)) + 0.5 - $preInner - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx - dxf := float64(dr.Min.X + int(dx)) + 0.5 - sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] - sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] - if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { - continue - } - - sx -= 0.5 - sx0 := int(sx) - xFrac0 := sx - float64(sx0) - xFrac1 := 1 - xFrac0 - sx0 += bias.X - sx1 := sx0 + 1 - if sx0 < sr.Min.X { - sx0, sx1 = sr.Min.X, sr.Min.X - xFrac0, xFrac1 = 0, 1 - } else if sx1 >= sr.Max.X { - sx0, sx1 = sr.Max.X-1, sr.Max.X-1 - xFrac0, xFrac1 = 1, 0 - } - - sy -= 0.5 - sy0 := int(sy) - yFrac0 := sy - float64(sy0) - yFrac1 := 1 - yFrac0 - sy0 += bias.Y - sy1 := sy0 + 1 - if sy0 < sr.Min.Y { - sy0, sy1 = sr.Min.Y, sr.Min.Y - yFrac0, yFrac1 = 0, 1 - } else if sy1 >= sr.Max.Y { - sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 - yFrac0, yFrac1 = 1, 0 - } - - s00 := $srcf[sx0, sy0] - s10 := $srcf[sx1, sy0] - $blend[xFrac1, s00, xFrac0, s10] - s01 := $srcf[sx0, sy1] - s11 := $srcf[sx1, sy1] - $blend[xFrac1, s01, xFrac0, s11] - $blend[yFrac1, s10, yFrac0, s11] - $convFtou[p, s11] - $outputu[dr.Min.X + int(dx), dr.Min.Y + int(dy), p] - } - } - } - ` - - codeKernelRoot = ` - func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { - if z.dw != int32(dr.Dx()) || z.dh != int32(dr.Dy()) || z.sw != int32(sr.Dx()) || z.sh != int32(sr.Dy()) { - z.kernel.Scale(dst, dr, src, sr, op, opts) - return - } - - var o Options - if opts != nil { - o = *opts - } - - // adr is the affected destination pixels. - adr := dst.Bounds().Intersect(dr) - adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) - if adr.Empty() || sr.Empty() { - return - } - // Make adr relative to dr.Min. - adr = adr.Sub(dr.Min) - if op == Over && o.SrcMask == nil && opaque(src) { - op = Src - } - - if _, ok := src.(*image.Uniform); ok && o.DstMask == nil && o.SrcMask == nil && sr.In(src.Bounds()) { - Draw(dst, dr, src, src.Bounds().Min, op) - return - } - - // Create a temporary buffer: - // scaleX distributes the source image's columns over the temporary image. - // scaleY distributes the temporary image's rows over the destination image. - var tmp [][4]float64 - if z.pool.New != nil { - tmpp := z.pool.Get().(*[][4]float64) - defer z.pool.Put(tmpp) - tmp = *tmpp - } else { - tmp = z.makeTmpBuf() - } - - // sr is the source pixels. If it extends beyond the src bounds, - // we cannot use the type-specific fast paths, as they access - // the Pix fields directly without bounds checking. - // - // Similarly, the fast paths assume that the masks are nil. - if o.SrcMask != nil || !sr.In(src.Bounds()) { - z.scaleX_Image(tmp, src, sr, &o) - } else { - $switchS z.scaleX_$sTypeRN$sratio(tmp, src, sr, &o) - } - - if o.DstMask != nil { - switch op { - case Over: - z.scaleY_Image_Over(dst, dr, adr, tmp, &o) - case Src: - z.scaleY_Image_Src(dst, dr, adr, tmp, &o) - } - } else { - $switchD z.scaleY_$dTypeRN_$op(dst, dr, adr, tmp, &o) - } - } - - func (q *Kernel) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { - var o Options - if opts != nil { - o = *opts - } - - dr := transformRect(&s2d, &sr) - // adr is the affected destination pixels. - adr := dst.Bounds().Intersect(dr) - adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) - if adr.Empty() || sr.Empty() { - return - } - if op == Over && o.SrcMask == nil && opaque(src) { - op = Src - } - d2s := invert(&s2d) - // bias is a translation of the mapping from dst coordinates to src - // coordinates such that the latter temporarily have non-negative X - // and Y coordinates. This allows us to write int(f) instead of - // int(math.Floor(f)), since "round to zero" and "round down" are - // equivalent when f >= 0, but the former is much cheaper. The X-- - // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" - // adjustment. - bias := transformRect(&d2s, &adr).Min - bias.X-- - bias.Y-- - d2s[2] -= float64(bias.X) - d2s[5] -= float64(bias.Y) - // Make adr relative to dr.Min. - adr = adr.Sub(dr.Min) - - if u, ok := src.(*image.Uniform); ok && o.DstMask != nil && o.SrcMask != nil && sr.In(src.Bounds()) { - transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) - return - } - - xscale := abs(d2s[0]) - if s := abs(d2s[1]); xscale < s { - xscale = s - } - yscale := abs(d2s[3]) - if s := abs(d2s[4]); yscale < s { - yscale = s - } - - // sr is the source pixels. If it extends beyond the src bounds, - // we cannot use the type-specific fast paths, as they access - // the Pix fields directly without bounds checking. - // - // Similarly, the fast paths assume that the masks are nil. - if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { - switch op { - case Over: - q.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) - case Src: - q.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) - } - } else { - $switch q.transform_$dTypeRN_$sTypeRN$sratio_$op(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) - } - } - ` - - codeKernelScaleLeafX = ` - func (z *kernelScaler) scaleX_$sTypeRN$sratio(tmp [][4]float64, src $sType, sr image.Rectangle, opts *Options) { - t := 0 - $preKernelOuter - for y := int32(0); y < z.sh; y++ { - for _, s := range z.horizontal.sources { - var pr, pg, pb, pa float64 $tweakVarP - for _, c := range z.horizontal.contribs[s.i:s.j] { - p += $srcf[sr.Min.X + int(c.coord), sr.Min.Y + int(y)] * c.weight - } - $tweakPr - tmp[t] = [4]float64{ - pr * s.invTotalWeightFFFF, $tweakP - pg * s.invTotalWeightFFFF, $tweakP - pb * s.invTotalWeightFFFF, $tweakP - pa * s.invTotalWeightFFFF, $tweakP - } - t++ - } - } - } - ` - - codeKernelScaleLeafY = ` - func (z *kernelScaler) scaleY_$dTypeRN_$op(dst $dType, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { - $preOuter - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { - $preKernelInner - for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { $tweakDy - var pr, pg, pb, pa float64 - for _, c := range z.vertical.contribs[s.i:s.j] { - p := &tmp[c.coord*z.dw+dx] - pr += p[0] * c.weight - pg += p[1] * c.weight - pb += p[2] * c.weight - pa += p[3] * c.weight - } - $clampToAlpha - $outputf[dr.Min.X + int(dx), dr.Min.Y + int(adr.Min.Y + dy), ftou, p, s.invTotalWeight] - $tweakD - } - } - } - ` - - codeKernelTransformLeaf = ` - func (q *Kernel) transform_$dTypeRN_$sTypeRN$sratio_$op(dst $dType, dr, adr image.Rectangle, d2s *f64.Aff3, src $sType, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { - // When shrinking, broaden the effective kernel support so that we still - // visit every source pixel. - xHalfWidth, xKernelArgScale := q.Support, 1.0 - if xscale > 1 { - xHalfWidth *= xscale - xKernelArgScale = 1 / xscale - } - yHalfWidth, yKernelArgScale := q.Support, 1.0 - if yscale > 1 { - yHalfWidth *= yscale - yKernelArgScale = 1 / yscale - } - - xWeights := make([]float64, 1 + 2*int(math.Ceil(xHalfWidth))) - yWeights := make([]float64, 1 + 2*int(math.Ceil(yHalfWidth))) - - $preOuter - for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { - dyf := float64(dr.Min.Y + int(dy)) + 0.5 - $preInner - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { $tweakDx - dxf := float64(dr.Min.X + int(dx)) + 0.5 - sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] - sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] - if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { - continue - } - - // TODO: adjust the bias so that we can use int(f) instead - // of math.Floor(f) and math.Ceil(f). - sx += float64(bias.X) - sx -= 0.5 - ix := int(math.Floor(sx - xHalfWidth)) - if ix < sr.Min.X { - ix = sr.Min.X - } - jx := int(math.Ceil(sx + xHalfWidth)) - if jx > sr.Max.X { - jx = sr.Max.X - } - - totalXWeight := 0.0 - for kx := ix; kx < jx; kx++ { - xWeight := 0.0 - if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { - xWeight = q.At(t) - } - xWeights[kx - ix] = xWeight - totalXWeight += xWeight - } - for x := range xWeights[:jx-ix] { - xWeights[x] /= totalXWeight - } - - sy += float64(bias.Y) - sy -= 0.5 - iy := int(math.Floor(sy - yHalfWidth)) - if iy < sr.Min.Y { - iy = sr.Min.Y - } - jy := int(math.Ceil(sy + yHalfWidth)) - if jy > sr.Max.Y { - jy = sr.Max.Y - } - - totalYWeight := 0.0 - for ky := iy; ky < jy; ky++ { - yWeight := 0.0 - if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { - yWeight = q.At(t) - } - yWeights[ky - iy] = yWeight - totalYWeight += yWeight - } - for y := range yWeights[:jy-iy] { - yWeights[y] /= totalYWeight - } - - var pr, pg, pb, pa float64 $tweakVarP - for ky := iy; ky < jy; ky++ { - if yWeight := yWeights[ky - iy]; yWeight != 0 { - for kx := ix; kx < jx; kx++ { - if w := xWeights[kx - ix] * yWeight; w != 0 { - p += $srcf[kx, ky] * w - } - } - } - } - $clampToAlpha - $outputf[dr.Min.X + int(dx), dr.Min.Y + int(dy), fffftou, p, 1] - } - } - } - ` -) diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 5d052781b..000000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go -//go:generate go run gen.go -test - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func genFile(name string, buf *bytes.Buffer) { - b, err := format.Source(buf.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile(name, b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - // uniq - lists have dups - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - all[w] = s - w++ - } - } - all = all[:w] - - if *test { - var buf bytes.Buffer - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") - fmt.Fprintln(&buf, "package atom\n") - fmt.Fprintln(&buf, "var testAtomList = []string{") - for _, s := range all { - fmt.Fprintf(&buf, "\t%q,\n", s) - } - fmt.Fprintln(&buf, "}") - - genFile("table_test.go", &buf) - return - } - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - var buf bytes.Buffer - // Generate the Go code. - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go\n") - fmt.Fprintln(&buf, "package atom\n\nconst (") - - // compute max len - maxLen := 0 - for _, s := range all { - if maxLen < len(s) { - maxLen = len(s) - } - fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Fprintln(&buf, ")\n") - - fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) - fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - - fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Fprintf(&buf, "}\n") - datasize := (1 << best.k) * 4 - - fmt.Fprintln(&buf, "const atomText =") - textsize := len(text) - for len(text) > 60 { - fmt.Fprintf(&buf, "\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Fprintf(&buf, "\t%q\n\n", text) - - genFile("table.go", &buf) - - fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 16 April 2018" version. - -// "command", "keygen" and "menuitem" have been removed from the spec, -// but are kept here for backwards compatibility. -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "main", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "picture", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "slot", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 -// -// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", -// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, -// but are kept here for backwards compatibility. -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "allowfullscreen", - "allowpaymentrequest", - "allowusermedia", - "alt", - "as", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "color", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "integrity", - "is", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "nomodule", - "nonce", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "playsinline", - "poster", - "preload", - "radiogroup", - "readonly", - "referrerpolicy", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "slot", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "srcset", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "updateviacache", - "usemap", - "value", - "width", - "workertype", - "wrap", -} - -// "onautocomplete", "onautocompleteerror", "onmousewheel", -// "onshow" and "onsort" have been removed from the spec, -// but are kept here for backwards compatibility. -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onauxclick", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncopy", - "oncuechange", - "oncut", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragexit", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadend", - "onloadstart", - "onmessage", - "onmessageerror", - "onmousedown", - "onmouseenter", - "onmouseleave", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onwheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpaste", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onrejectionhandled", - "onscroll", - "onsecuritypolicyviolation", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunhandledrejection", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "acronym", - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "rb", - "rtc", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go index a3a918f0b..73804d347 100644 --- a/vendor/golang.org/x/net/html/const.go +++ b/vendor/golang.org/x/net/html/const.go @@ -52,7 +52,6 @@ var isSpecialElementMap = map[string]bool{ "iframe": true, "img": true, "input": true, - "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. "keygen": true, "li": true, "link": true, diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go index 01477a963..74774c458 100644 --- a/vendor/golang.org/x/net/html/foreign.go +++ b/vendor/golang.org/x/net/html/foreign.go @@ -172,7 +172,6 @@ var svgAttributeAdjustments = map[string]string{ "diffuseconstant": "diffuseConstant", "edgemode": "edgeMode", "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", "filterunits": "filterUnits", "glyphref": "glyphRef", "gradienttransform": "gradientTransform", diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 633ee15dc..1350eef22 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -18,6 +18,11 @@ const ( ElementNode CommentNode DoctypeNode + // RawNode nodes are not returned by the parser, but can be part of the + // Node tree passed to func Render to insert raw HTML (without escaping). + // If so, this package makes no guarantee that the rendered HTML is secure + // (from e.g. Cross Site Scripting attacks) or well-formed. + RawNode scopeMarkerNode ) diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 992cff2a3..2cd12fc81 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -184,6 +184,17 @@ func (p *parser) clearStackToContext(s scope) { } } +// parseGenericRawTextElements implements the generic raw text element parsing +// algorithm defined in 12.2.6.2. +// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text +// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part +// officially, need to make tokenizer consider both states. +func (p *parser) parseGenericRawTextElement() { + p.addElement() + p.originalIM = p.im + p.im = textIM +} + // generateImpliedEndTags pops nodes off the stack of open elements as long as // the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. // If exceptions are specified, nodes with that name will not be popped off. @@ -192,16 +203,17 @@ func (p *parser) generateImpliedEndTags(exceptions ...string) { loop: for i = len(p.oe) - 1; i >= 0; i-- { n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: - for _, except := range exceptions { - if n.Data == except { - break loop - } + if n.Type != ElementNode { + break + } + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop } - continue } + continue } break } @@ -369,8 +381,7 @@ findIdenticalElements: // Section 12.2.4.3. func (p *parser) clearActiveFormattingElements() { for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { + if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode { return } } @@ -625,25 +636,29 @@ func inHeadIM(p *parser) bool { switch p.tok.DataAtom { case a.Html: return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta: p.addElement() p.oe.pop() p.acknowledgeSelfClosingTag() return true case a.Noscript: - p.addElement() if p.scripting { - p.setOriginalIM() - p.im = textIM - } else { - p.im = inHeadNoscriptIM + p.parseGenericRawTextElement() + return true } + p.addElement() + p.im = inHeadNoscriptIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() return true - case a.Script, a.Title, a.Noframes, a.Style: + case a.Script, a.Title: p.addElement() p.setOriginalIM() p.im = textIM return true + case a.Noframes, a.Style: + p.parseGenericRawTextElement() + return true case a.Head: // Ignore the token. return true @@ -855,7 +870,7 @@ func inBodyIM(p *parser) bool { return true } copyAttributes(p.oe[0], p.tok) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: return inHeadIM(p) case a.Body: if p.oe.contains(a.Template) { @@ -881,7 +896,7 @@ func inBodyIM(p *parser) bool { p.addElement() p.im = inFramesetIM return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul: p.popUntil(buttonScope, a.P) p.addElement() case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: @@ -1014,53 +1029,6 @@ func inBodyIM(p *parser) bool { p.tok.DataAtom = a.Img p.tok.Data = a.Img.String() return false - case a.Isindex: - if p.form != nil { - // Ignore the token. - return true - } - action := "" - prompt := "This is a searchable index. Enter search keywords: " - attr := []Attribute{{Key: "name", Val: "isindex"}} - for _, t := range p.tok.Attr { - switch t.Key { - case "action": - action = t.Val - case "name": - // Ignore the attribute. - case "prompt": - prompt = t.Val - default: - attr = append(attr, t) - } - } - p.acknowledgeSelfClosingTag() - p.popUntil(buttonScope, a.P) - p.parseImpliedToken(StartTagToken, a.Form, a.Form.String()) - if p.form == nil { - // NOTE: The 'isindex' element has been removed, - // and the 'template' element has not been designed to be - // collaborative with the index element. - // - // Ignore the token. - return true - } - if action != "" { - p.form.Attr = []Attribute{{Key: "action", Val: action}} - } - p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String()) - p.parseImpliedToken(StartTagToken, a.Label, a.Label.String()) - p.addText(prompt) - p.addChild(&Node{ - Type: ElementNode, - DataAtom: a.Input, - Data: a.Input.String(), - Attr: attr, - }) - p.oe.pop() - p.parseImpliedToken(EndTagToken, a.Label, a.Label.String()) - p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String()) - p.parseImpliedToken(EndTagToken, a.Form, a.Form.String()) case a.Textarea: p.addElement() p.setOriginalIM() @@ -1070,18 +1038,21 @@ func inBodyIM(p *parser) bool { p.popUntil(buttonScope, a.P) p.reconstructActiveFormattingElements() p.framesetOK = false - p.addElement() - p.setOriginalIM() - p.im = textIM + p.parseGenericRawTextElement() case a.Iframe: p.framesetOK = false + p.parseGenericRawTextElement() + case a.Noembed: + p.parseGenericRawTextElement() + case a.Noscript: + if p.scripting { + p.parseGenericRawTextElement() + return true + } + p.reconstructActiveFormattingElements() p.addElement() - p.setOriginalIM() - p.im = textIM - case a.Noembed, a.Noscript: - p.addElement() - p.setOriginalIM() - p.im = textIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() case a.Select: p.reconstructActiveFormattingElements() p.addElement() @@ -1137,7 +1108,7 @@ func inBodyIM(p *parser) bool { return false } return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: p.popUntil(defaultScope, p.tok.DataAtom) case a.Form: if p.oe.contains(a.Template) { @@ -1198,14 +1169,13 @@ func inBodyIM(p *parser) bool { if len(p.templateStack) > 0 { p.im = inTemplateIM return false - } else { - for _, e := range p.oe { - switch e.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th, - a.Thead, a.Tr, a.Body, a.Html: - default: - return true - } + } + for _, e := range p.oe { + switch e.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th, + a.Thead, a.Tr, a.Body, a.Html: + default: + return true } } } @@ -1221,9 +1191,15 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { // Once the code successfully parses the comprehensive test suite, we should // refactor this code to be more idiomatic. - // Steps 1-4. The outer loop. + // Steps 1-2 + if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 { + p.oe.pop() + return + } + + // Steps 3-5. The outer loop. for i := 0; i < 8; i++ { - // Step 5. Find the formatting element. + // Step 6. Find the formatting element. var formattingElement *Node for j := len(p.afe) - 1; j >= 0; j-- { if p.afe[j].Type == scopeMarkerNode { @@ -1238,17 +1214,22 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { p.inBodyEndTagOther(tagAtom, tagName) return } + + // Step 7. Ignore the tag if formatting element is not in the stack of open elements. feIndex := p.oe.index(formattingElement) if feIndex == -1 { p.afe.remove(formattingElement) return } + // Step 8. Ignore the tag if formatting element is not in the scope. if !p.elementInScope(defaultScope, tagAtom) { // Ignore the tag. return } - // Steps 9-10. Find the furthest block. + // Step 9. This step is omitted because it's just a parse error but no need to return. + + // Steps 10-11. Find the furthest block. var furthestBlock *Node for _, e := range p.oe[feIndex:] { if isSpecialElement(e) { @@ -1265,47 +1246,65 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { return } - // Steps 11-12. Find the common ancestor and bookmark node. + // Steps 12-13. Find the common ancestor and bookmark node. commonAncestor := p.oe[feIndex-1] bookmark := p.afe.index(formattingElement) - // Step 13. The inner loop. Find the lastNode to reparent. + // Step 14. The inner loop. Find the lastNode to reparent. lastNode := furthestBlock node := furthestBlock x := p.oe.index(node) - // Steps 13.1-13.2 - for j := 0; j < 3; j++ { - // Step 13.3. + // Step 14.1. + j := 0 + for { + // Step 14.2. + j++ + // Step. 14.3. x-- node = p.oe[x] - // Step 13.4 - 13.5. + // Step 14.4. Go to the next step if node is formatting element. + if node == formattingElement { + break + } + // Step 14.5. Remove node from the list of active formatting elements if + // inner loop counter is greater than three and node is in the list of + // active formatting elements. + if ni := p.afe.index(node); j > 3 && ni > -1 { + p.afe.remove(node) + // If any element of the list of active formatting elements is removed, + // we need to take care whether bookmark should be decremented or not. + // This is because the value of bookmark may exceed the size of the + // list by removing elements from the list. + if ni <= bookmark { + bookmark-- + } + continue + } + // Step 14.6. Continue the next inner loop if node is not in the list of + // active formatting elements. if p.afe.index(node) == -1 { p.oe.remove(node) continue } - // Step 13.6. - if node == formattingElement { - break - } - // Step 13.7. + // Step 14.7. clone := node.clone() p.afe[p.afe.index(node)] = clone p.oe[p.oe.index(node)] = clone node = clone - // Step 13.8. + // Step 14.8. if lastNode == furthestBlock { bookmark = p.afe.index(node) + 1 } - // Step 13.9. + // Step 14.9. if lastNode.Parent != nil { lastNode.Parent.RemoveChild(lastNode) } node.AppendChild(lastNode) - // Step 13.10. + // Step 14.10. lastNode = node } - // Step 14. Reparent lastNode to the common ancestor, + // Step 15. Reparent lastNode to the common ancestor, // or for misnested table nodes, to the foster parent. if lastNode.Parent != nil { lastNode.Parent.RemoveChild(lastNode) @@ -1317,13 +1316,13 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { commonAncestor.AppendChild(lastNode) } - // Steps 15-17. Reparent nodes from the furthest block's children + // Steps 16-18. Reparent nodes from the furthest block's children // to a clone of the formatting element. clone := formattingElement.clone() reparentChildren(clone, furthestBlock) furthestBlock.AppendChild(clone) - // Step 18. Fix up the list of active formatting elements. + // Step 19. Fix up the list of active formatting elements. if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark { // Move the bookmark with the rest of the list. bookmark-- @@ -1331,7 +1330,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { p.afe.remove(formattingElement) p.afe.insert(bookmark, clone) - // Step 19. Fix up the stack of open elements. + // Step 20. Fix up the stack of open elements. p.oe.remove(formattingElement) p.oe.insert(p.oe.index(furthestBlock)+1, clone) } @@ -1502,14 +1501,13 @@ func inCaptionIM(p *parser) bool { case StartTagToken: switch p.tok.DataAtom { case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr: - if p.popUntil(tableScope, a.Caption) { - p.clearActiveFormattingElements() - p.im = inTableIM - return false - } else { + if !p.popUntil(tableScope, a.Caption) { // Ignore the token. return true } + p.clearActiveFormattingElements() + p.im = inTableIM + return false case a.Select: p.reconstructActiveFormattingElements() p.addElement() @@ -1526,14 +1524,13 @@ func inCaptionIM(p *parser) bool { } return true case a.Table: - if p.popUntil(tableScope, a.Caption) { - p.clearActiveFormattingElements() - p.im = inTableIM - return false - } else { + if !p.popUntil(tableScope, a.Caption) { // Ignore the token. return true } + p.clearActiveFormattingElements() + p.im = inTableIM + return false case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: // Ignore the token. return true @@ -1777,12 +1774,11 @@ func inSelectIM(p *parser) bool { } p.addElement() case a.Select: - if p.popUntil(selectScope, a.Select) { - p.resetInsertionMode() - } else { + if !p.popUntil(selectScope, a.Select) { // Ignore the token. return true } + p.resetInsertionMode() case a.Input, a.Keygen, a.Textarea: if p.elementInScope(selectScope, a.Select) { p.parseImpliedToken(EndTagToken, a.Select, a.Select.String()) @@ -1810,12 +1806,11 @@ func inSelectIM(p *parser) bool { p.oe = p.oe[:i] } case a.Select: - if p.popUntil(selectScope, a.Select) { - p.resetInsertionMode() - } else { + if !p.popUntil(selectScope, a.Select) { // Ignore the token. return true } + p.resetInsertionMode() case a.Template: return inHeadIM(p) } @@ -2136,28 +2131,31 @@ func parseForeignContent(p *parser) bool { Data: p.tok.Data, }) case StartTagToken: - b := breakout[p.tok.Data] - if p.tok.DataAtom == a.Font { - loop: - for _, attr := range p.tok.Attr { - switch attr.Key { - case "color", "face", "size": - b = true - break loop + if !p.fragment { + b := breakout[p.tok.Data] + if p.tok.DataAtom == a.Font { + loop: + for _, attr := range p.tok.Attr { + switch attr.Key { + case "color", "face", "size": + b = true + break loop + } } } - } - if b { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) { - p.oe = p.oe[:i+1] - break + if b { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) { + p.oe = p.oe[:i+1] + break + } } + return false } - return false } - switch p.top().Namespace { + current := p.adjustedCurrentNode() + switch current.Namespace { case "math": adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments) case "svg": @@ -2172,7 +2170,7 @@ func parseForeignContent(p *parser) bool { panic("html: bad parser state: unexpected namespace") } adjustForeignAttributes(p.tok.Attr) - namespace := p.top().Namespace + namespace := current.Namespace p.addElement() p.top().Namespace = namespace if namespace != "" { @@ -2201,12 +2199,20 @@ func parseForeignContent(p *parser) bool { return true } +// Section 12.2.4.2. +func (p *parser) adjustedCurrentNode() *Node { + if len(p.oe) == 1 && p.fragment && p.context != nil { + return p.context + } + return p.oe.top() +} + // Section 12.2.6. func (p *parser) inForeignContent() bool { if len(p.oe) == 0 { return false } - n := p.oe[len(p.oe)-1] + n := p.adjustedCurrentNode() if n.Namespace == "" { return false } @@ -2341,8 +2347,7 @@ func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) { f(p) } - err := p.parse() - if err != nil { + if err := p.parse(); err != nil { return nil, err } return p.doc, nil @@ -2364,7 +2369,6 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ( contextTag = context.DataAtom.String() } p := &parser{ - tokenizer: NewTokenizerFragment(r, contextTag), doc: &Node{ Type: DocumentNode, }, @@ -2372,6 +2376,11 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ( fragment: true, context: context, } + if context != nil && context.Namespace != "" { + p.tokenizer = NewTokenizer(r) + } else { + p.tokenizer = NewTokenizerFragment(r, contextTag) + } for _, f := range opts { f(p) @@ -2396,8 +2405,7 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ( } } - err := p.parse() - if err != nil { + if err := p.parse(); err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go index d34564f49..46879c083 100644 --- a/vendor/golang.org/x/net/html/render.go +++ b/vendor/golang.org/x/net/html/render.go @@ -134,6 +134,9 @@ func render1(w writer, n *Node) error { } } return w.WriteByte('>') + case RawNode: + _, err := w.WriteString(n.Data) + return err default: return errors.New("html: unknown node type") } @@ -252,20 +255,19 @@ func writeQuoted(w writer, s string) error { // Section 12.1.2, "Elements", gives this list of void elements. Void elements // are those that can't have any contents. var voidElements = map[string]bool{ - "area": true, - "base": true, - "br": true, - "col": true, - "command": true, - "embed": true, - "hr": true, - "img": true, - "input": true, - "keygen": true, - "link": true, - "meta": true, - "param": true, - "source": true, - "track": true, - "wbr": true, + "area": true, + "base": true, + "br": true, + "col": true, + "embed": true, + "hr": true, + "img": true, + "input": true, + "keygen": true, + "link": true, + "meta": true, + "param": true, + "source": true, + "track": true, + "wbr": true, } diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index e3c01d7c9..877709f99 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -296,8 +296,7 @@ func (z *Tokenizer) Buffered() []byte { // too many times in succession. func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { for i := 0; i < 100; i++ { - n, err := r.Read(b) - if n != 0 || err != nil { + if n, err := r.Read(b); n != 0 || err != nil { return n, err } } @@ -347,6 +346,7 @@ loop: break loop } if c != '/' { + z.raw.end-- continue loop } if z.readRawEndTag() || z.err != nil { @@ -1067,6 +1067,11 @@ loop: // Raw returns the unmodified text of the current token. Calling Next, Token, // Text, TagName or TagAttr may change the contents of the returned slice. +// +// The token stream's raw bytes partition the byte stream (up until an +// ErrorToken). There are no overlaps or gaps between two consecutive token's +// raw bytes. One implication is that the byte offset of the current token is +// the sum of the lengths of all previous tokens' raw bytes. func (z *Tokenizer) Raw() []byte { return z.buf[z.raw.start:z.raw.end] } diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 1565cf270..97f17831f 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -150,7 +150,7 @@ func appendIndexed(dst []byte, i uint64) []byte { // extended buffer. // // If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" +// f.Sensitive is false and indexing is true, "Incremental Indexing" // representation is used. func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index a6140099c..2a5399ec4 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -17,6 +17,7 @@ type pipe struct { mu sync.Mutex c sync.Cond // c.L lazily initialized to &p.mu b pipeBuffer // nil when done reading + unread int // bytes unread when done err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error @@ -33,7 +34,7 @@ func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() if p.b == nil { - return 0 + return p.unread } return p.b.Len() } @@ -80,6 +81,7 @@ func (p *pipe) Write(d []byte) (n int, err error) { return 0, errClosedPipeWrite } if p.breakErr != nil { + p.unread += len(d) return len(d), nil // discard when there is no reader } return p.b.Write(d) @@ -117,6 +119,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { } p.readFn = fn if dst == &p.breakErr { + if p.b != nil { + p.unread += p.b.Len() + } p.b = nil } *dst = err diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index d4abeb2b9..de31d72b2 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -52,10 +52,11 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 ) var ( @@ -163,6 +164,15 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +// maxQueuedControlFrames is the maximum number of control frames like +// SETTINGS, PING and RST_STREAM that will be queued for writing before +// the connection is closed to prevent memory exhaustion attacks. +func (s *Server) maxQueuedControlFrames() int { + // TODO: if anybody asks, add a Server field, and remember to define the + // behavior of negative values. + return maxQueuedControlFrames +} + type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -242,7 +252,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).") } } @@ -273,7 +283,20 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, Handler: h, BaseConfig: hs, }) @@ -284,6 +307,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { // ServeConnOpts are options for the Server.ServeConn method. type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + // BaseConfig optionally sets the base configuration // for values. If nil, defaults are used. BaseConfig *http.Server @@ -294,6 +321,13 @@ type ServeConnOpts struct { Handler http.Handler } +func (o *ServeConnOpts) context() context.Context { + if o != nil && o.Context != nil { + return o.Context + } + return context.Background() +} + func (o *ServeConnOpts) baseConfig() *http.Server { if o != nil && o.BaseConfig != nil { return o.BaseConfig @@ -439,7 +473,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(opts.context()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) if hs := opts.baseConfig(); hs != nil { ctx = context.WithValue(ctx, http.ServerContextKey, hs) @@ -482,6 +516,7 @@ type serverConn struct { sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? + queuedControlFrames int // control frames in the writeSched queue clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client @@ -870,6 +905,14 @@ func (sc *serverConn) serve() { } } + // If the peer is causing us to generate a lot of control frames, + // but not reading them from us, assume they are trying to make us + // run out of memory. + if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + sc.vlogf("http2: too many control frames in send queue, closing connection") + return + } + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY // with no error code (graceful shutdown), don't start the timer until // all open streams have been completed. @@ -1069,6 +1112,14 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) { } if !ignoreWrite { + if wr.isControl() { + sc.queuedControlFrames++ + // For extra safety, detect wraparounds, which should not happen, + // and pull the plug. + if sc.queuedControlFrames < 0 { + sc.conn.Close() + } + } sc.writeSched.Push(wr) } sc.scheduleFrameWrite() @@ -1186,10 +1237,8 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // If a frame is already being written, nothing happens. This will be called again // when the frame is done being written. // -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. +// If a frame isn't being written and we need to send one, the best frame +// to send is selected by writeSched. // // If a frame isn't being written and there's nothing else to send, we // flush the write buffer. @@ -1217,6 +1266,9 @@ func (sc *serverConn) scheduleFrameWrite() { } if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { if wr, ok := sc.writeSched.Pop(); ok { + if wr.isControl() { + sc.queuedControlFrames-- + } sc.startFrameWrite(wr) continue } @@ -1509,6 +1561,8 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { if err := f.ForeachSetting(sc.processSetting); err != nil { return err } + // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be + // acknowledged individually, even if multiple are received before the ACK. sc.needToSendSettingsAck = true sc.scheduleFrameWrite() return nil @@ -2361,7 +2415,11 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + // If the Content-Encoding is non-blank, we shouldn't + // sniff the body. See Issue golang.org/issue/31753. + ce := rws.snapHeader.Get("Content-Encoding") + hasCE := len(ce) > 0 + if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { ctype = http.DetectContentType(p) } var date string @@ -2470,7 +2528,7 @@ const TrailerPrefix = "Trailer:" // trailers. That worked for a while, until we found the first major // user of Trailers in the wild: gRPC (using them only over http2), // and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old +// predeclaring them. So: change of plans. We still permit the old // way, but we also permit this hack: if a Header() key begins with // "Trailer:", the suffix of that key is a Trailer. Because ':' is an // invalid token byte anyway, there is no ambiguity. (And it's already @@ -2770,7 +2828,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that // is in either the "open" or "half-closed (remote)" state. if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. + // responseWriter.Push checks that the stream is peer-initiated. msg.done <- errStreamClosed return } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c0c80d893..d948e96ee 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -93,7 +93,7 @@ type Transport struct { // send in the initial settings frame. It is how many bytes // of response headers are allowed. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport + // want to advertise an unlimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) // to mean no limit. MaxHeaderListSize uint32 @@ -227,6 +227,7 @@ type ClientConn struct { br *bufio.Reader fr *Framer lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -603,7 +604,7 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) + return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { @@ -736,7 +737,8 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest return } @@ -746,6 +748,16 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// tooIdleLocked reports whether this connection has been been sitting idle +// for too much wall time. +func (cc *ClientConn) tooIdleLocked() bool { + // The Round(0) strips the monontonic clock reading so the + // times are compared based on their wall time. We don't want + // to reuse a connection that's been sitting idle during + // VM/laptop suspend if monotonic time was also frozen. + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout +} + // onIdleTimeout is called from a time.AfterFunc goroutine. It will // only be called when we're idle, but because we're coming from a new // goroutine, there could be a new request coming in at the same time, @@ -992,7 +1004,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: @@ -1150,6 +1162,7 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { } return errClientConnUnusable } + cc.lastIdle = time.Time{} if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { if waitingForConn != nil { close(waitingForConn) @@ -1216,6 +1229,8 @@ var ( // abort request body write, but send stream reset of cancel. errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") + + errReqBodyTooLong = errors.New("http2: request body larger than specified content length") ) func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { @@ -1238,10 +1253,32 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( req := cs.req hasTrailers := req.Trailer != nil + remainLen := actualContentLength(req) + hasContentLen := remainLen != -1 var sawEOF bool for !sawEOF { - n, err := body.Read(buf) + n, err := body.Read(buf[:len(buf)-1]) + if hasContentLen { + remainLen -= int64(n) + if remainLen == 0 && err == nil { + // The request body's Content-Length was predeclared and + // we just finished reading it all, but the underlying io.Reader + // returned the final chunk with a nil error (which is one of + // the two valid things a Reader can do at EOF). Because we'd prefer + // to send the END_STREAM bit early, double-check that we're actually + // at EOF. Subsequent reads should return (0, EOF) at this point. + // If either value is different, we return an error in one of two ways below. + var n1 int + n1, err = body.Read(buf[n:]) + remainLen -= int64(n1) + } + if remainLen < 0 { + err = errReqBodyTooLong + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + return err + } + } if err == io.EOF { sawEOF = true err = nil @@ -1454,7 +1491,29 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if vv[0] == "" { continue } - + } else if strings.EqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue } for _, v := range vv { @@ -1592,6 +1651,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { delete(cc.streams, id) if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) + cc.lastIdle = time.Now() } close(cs.done) // Wake up checkResetOrDone via clientStream.awaitFlowControl and diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 4fe307307..f24d2b1e7 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -32,7 +32,7 @@ type WriteScheduler interface { // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. + // order they are Push'd. No frames should be discarded except by CloseStream. Pop() (wr FrameWriteRequest, ok bool) } @@ -76,6 +76,12 @@ func (wr FrameWriteRequest) StreamID() uint32 { return wr.stream.id } +// isControl reports whether wr is a control frame for MaxQueuedControlFrames +// purposes. That includes non-stream frames and RST_STREAM frames. +func (wr FrameWriteRequest) isControl() bool { + return wr.stream == nil +} + // DataSize returns the number of flow control bytes that must be consumed // to write this entire frame. This is 0 for non-DATA frames. func (wr FrameWriteRequest) DataSize() int { diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 848fed6ec..2618b2c11 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -149,7 +149,7 @@ func (n *priorityNode) addBytes(b int64) { } // walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the +// with a non-empty write queue. When f returns true, this function returns true and the // walk halts. tmp is used as scratch space for sorting. // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index 36d7919f1..9a7b9e581 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -19,7 +19,8 @@ type randomWriteScheduler struct { zero writeQueue // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. sq map[uint32]*writeQueue // pool of empty queues for reuse. @@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { return ws.zero.shift(), true } // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { + for streamID, q := range ws.sq { if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } return wr, true } } diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index c515d7ad2..8ce0811fd 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.00.go b/vendor/golang.org/x/net/idna/tables12.00.go new file mode 100644 index 000000000..f4b8ea363 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables12.00.go @@ -0,0 +1,4733 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.14 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +var mappings string = "" + // Size: 8178 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多" + + "\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販" + + "\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打" + + "\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕" + + "\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你" + + "\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內" + + "\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉" + + "\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟" + + "\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙" + + "\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型" + + "\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮" + + "\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍" + + "\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰" + + "\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹" + + "\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞" + + "\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢" + + "\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙" + + "\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓" + + "\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛" + + "\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派" + + "\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆" + + "\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀" + + "\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾" + + "\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌" + + "\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒" + + "\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺" + + "\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋" + + "\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著" + + "\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜" + + "\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠" + + "\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁" + + "\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘" + + "\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲" + + "\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭" + + "\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4862 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" + + "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" + + "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" + + "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" + + "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" + + "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" + + "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" + + "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" + + "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" + + "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" + + "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" + + "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" + + "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" + + "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" + + "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" + + "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" + + "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" + + "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" + + "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" + + "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" + + "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" + + "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" + + "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" + + "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" + + "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" + + "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" + + "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" + + "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" + + "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" + + "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" + + "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" + + "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" + + "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" + + "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" + + "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" + + "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" + + "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" + + "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" + + "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" + + "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" + + "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" + + "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" + + "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" + + "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" + + "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" + + "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" + + "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" + + "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" + + "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." + + "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" + + "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" + + "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" + + "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" + + "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" + + "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" + + "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" + + "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" + + "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" + + "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" + + "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" + + "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" + + "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" + + ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" + + "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" + + "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" + + "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" + + "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" + + "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" + + "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" + + "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" + + "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" + + "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" + + "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" + + "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" + + ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29708 bytes (29.01 KiB). Checksum: c3ecc76d8fffa6e6. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, + 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e, + 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe, + 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e, + 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e, + 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde, + 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e, + 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5, + 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875, + 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935, + 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5, + 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15, + 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75, + 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95, + 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75, + 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5, + 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55, + 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15, + 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95, + 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5, + 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5, + 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5, + 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5, + 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275, + 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5, + 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, + 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475, + 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535, + 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5, + 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795, + 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855, + 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915, + 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5, + 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95, + 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55, + 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5, + 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95, + 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, + 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d, + 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05, + 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95, + 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd, + 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55, + 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5, + 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015, + 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x4045, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x4065, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x40a5, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40c5, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x4105, + 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x41a5, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6419, 0x10a2: 0x4205, 0x10a3: 0x4225, + 0x10a4: 0x4245, 0x10a5: 0x6431, 0x10a6: 0x4265, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x4305, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x4325, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x4345, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x43a5, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x4425, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008, + 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008, + // Block 0x49, offset 0x1240 + 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, + 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, + 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, + 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, + 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, + 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, + 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, + 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, + 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, + 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, + 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d8d, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7dad, + 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, + 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + // Block 0x56, offset 0x1580 + 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, + 0x158c: 0x7fae, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7fcd, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7ecd, + 0x159e: 0x7f2d, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x800e, 0x15b1: 0xb009, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, + 0x15b6: 0x806e, 0x15b7: 0xb031, 0x15b8: 0x808e, 0x15b9: 0xb059, 0x15ba: 0x80ae, 0x15bb: 0xb081, + 0x15bc: 0x80ce, 0x15bd: 0xb0a9, 0x15be: 0x80ee, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d, + 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, + 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, + 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, + 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, + 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, + 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, + 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, + 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, + 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, + 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, + 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, + 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, + 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, + 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, + 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, + 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, + 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x8a0e, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, + 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b3d, 0x1c5f: 0x8b3d, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0xc1c1, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1f1, 0x1dc1: 0xc229, 0x1dc2: 0xc261, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc281, 0x1dd1: 0xc2a1, + 0x1dd2: 0xc2c1, 0x1dd3: 0xc2e1, 0x1dd4: 0xc301, 0x1dd5: 0xc321, 0x1dd6: 0xc341, 0x1dd7: 0xc361, + 0x1dd8: 0xc381, 0x1dd9: 0xc3a1, 0x1dda: 0xc3c1, 0x1ddb: 0xc3e1, 0x1ddc: 0xc401, 0x1ddd: 0xc421, + 0x1dde: 0xc441, 0x1ddf: 0xc461, 0x1de0: 0xc481, 0x1de1: 0xc4a1, 0x1de2: 0xc4c1, 0x1de3: 0xc4e1, + 0x1de4: 0xc501, 0x1de5: 0xc521, 0x1de6: 0xc541, 0x1de7: 0xc561, 0x1de8: 0xc581, 0x1de9: 0xc5a1, + 0x1dea: 0xc5c1, 0x1deb: 0xc5e1, 0x1dec: 0xc601, 0x1ded: 0xc621, 0x1dee: 0xc641, 0x1def: 0xc661, + 0x1df0: 0xc681, 0x1df1: 0xc6a1, 0x1df2: 0xc6c1, 0x1df3: 0xc6e1, 0x1df4: 0xc701, 0x1df5: 0xc721, + 0x1df6: 0xc741, 0x1df7: 0xc761, 0x1df8: 0xc781, 0x1df9: 0xc7a1, 0x1dfa: 0xc7c1, 0x1dfb: 0xc7e1, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcb11, 0x1e01: 0xcb31, 0x1e02: 0xcb51, 0x1e03: 0x8b55, 0x1e04: 0xcb71, 0x1e05: 0xcb91, + 0x1e06: 0xcbb1, 0x1e07: 0xcbd1, 0x1e08: 0xcbf1, 0x1e09: 0xcc11, 0x1e0a: 0xcc31, 0x1e0b: 0xcc51, + 0x1e0c: 0xcc71, 0x1e0d: 0x8b75, 0x1e0e: 0xcc91, 0x1e0f: 0xccb1, 0x1e10: 0xccd1, 0x1e11: 0xccf1, + 0x1e12: 0x8b95, 0x1e13: 0xcd11, 0x1e14: 0xcd31, 0x1e15: 0xc441, 0x1e16: 0x8bb5, 0x1e17: 0xcd51, + 0x1e18: 0xcd71, 0x1e19: 0xcd91, 0x1e1a: 0xcdb1, 0x1e1b: 0xcdd1, 0x1e1c: 0x8bd5, 0x1e1d: 0xcdf1, + 0x1e1e: 0xce11, 0x1e1f: 0xce31, 0x1e20: 0xce51, 0x1e21: 0xce71, 0x1e22: 0xc7a1, 0x1e23: 0xce91, + 0x1e24: 0xceb1, 0x1e25: 0xced1, 0x1e26: 0xcef1, 0x1e27: 0xcf11, 0x1e28: 0xcf31, 0x1e29: 0xcf51, + 0x1e2a: 0xcf71, 0x1e2b: 0xcf91, 0x1e2c: 0xcfb1, 0x1e2d: 0xcfd1, 0x1e2e: 0xcff1, 0x1e2f: 0xd011, + 0x1e30: 0xd031, 0x1e31: 0xd051, 0x1e32: 0xd051, 0x1e33: 0xd051, 0x1e34: 0x8bf5, 0x1e35: 0xd071, + 0x1e36: 0xd091, 0x1e37: 0xd0b1, 0x1e38: 0x8c15, 0x1e39: 0xd0d1, 0x1e3a: 0xd0f1, 0x1e3b: 0xd111, + 0x1e3c: 0xd131, 0x1e3d: 0xd151, 0x1e3e: 0xd171, 0x1e3f: 0xd191, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd1b1, 0x1e41: 0xd1d1, 0x1e42: 0xd1f1, 0x1e43: 0xd211, 0x1e44: 0xd231, 0x1e45: 0xd251, + 0x1e46: 0xd251, 0x1e47: 0xd271, 0x1e48: 0xd291, 0x1e49: 0xd2b1, 0x1e4a: 0xd2d1, 0x1e4b: 0xd2f1, + 0x1e4c: 0xd311, 0x1e4d: 0xd331, 0x1e4e: 0xd351, 0x1e4f: 0xd371, 0x1e50: 0xd391, 0x1e51: 0xd3b1, + 0x1e52: 0xd3d1, 0x1e53: 0xd3f1, 0x1e54: 0xd411, 0x1e55: 0xd431, 0x1e56: 0xd451, 0x1e57: 0xd471, + 0x1e58: 0xd491, 0x1e59: 0x8c35, 0x1e5a: 0xd4b1, 0x1e5b: 0xd4d1, 0x1e5c: 0xd4f1, 0x1e5d: 0xc321, + 0x1e5e: 0xd511, 0x1e5f: 0xd531, 0x1e60: 0x8c55, 0x1e61: 0x8c75, 0x1e62: 0xd551, 0x1e63: 0xd571, + 0x1e64: 0xd591, 0x1e65: 0xd5b1, 0x1e66: 0xd5d1, 0x1e67: 0xd5f1, 0x1e68: 0x2040, 0x1e69: 0xd611, + 0x1e6a: 0xd631, 0x1e6b: 0xd631, 0x1e6c: 0x8c95, 0x1e6d: 0xd651, 0x1e6e: 0xd671, 0x1e6f: 0xd691, + 0x1e70: 0xd6b1, 0x1e71: 0x8cb5, 0x1e72: 0xd6d1, 0x1e73: 0xd6f1, 0x1e74: 0x2040, 0x1e75: 0xd711, + 0x1e76: 0xd731, 0x1e77: 0xd751, 0x1e78: 0xd771, 0x1e79: 0xd791, 0x1e7a: 0xd7b1, 0x1e7b: 0x8cd5, + 0x1e7c: 0xd7d1, 0x1e7d: 0x8cf5, 0x1e7e: 0xd7f1, 0x1e7f: 0xd811, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd831, 0x1e81: 0xd851, 0x1e82: 0xd871, 0x1e83: 0xd891, 0x1e84: 0xd8b1, 0x1e85: 0xd8d1, + 0x1e86: 0xd8f1, 0x1e87: 0xd911, 0x1e88: 0xd931, 0x1e89: 0x8d15, 0x1e8a: 0xd951, 0x1e8b: 0xd971, + 0x1e8c: 0xd991, 0x1e8d: 0xd9b1, 0x1e8e: 0xd9d1, 0x1e8f: 0x8d35, 0x1e90: 0xd9f1, 0x1e91: 0x8d55, + 0x1e92: 0x8d75, 0x1e93: 0xda11, 0x1e94: 0xda31, 0x1e95: 0xda31, 0x1e96: 0xda51, 0x1e97: 0x8d95, + 0x1e98: 0x8db5, 0x1e99: 0xda71, 0x1e9a: 0xda91, 0x1e9b: 0xdab1, 0x1e9c: 0xdad1, 0x1e9d: 0xdaf1, + 0x1e9e: 0xdb11, 0x1e9f: 0xdb31, 0x1ea0: 0xdb51, 0x1ea1: 0xdb71, 0x1ea2: 0xdb91, 0x1ea3: 0xdbb1, + 0x1ea4: 0x8dd5, 0x1ea5: 0xdbd1, 0x1ea6: 0xdbf1, 0x1ea7: 0xdc11, 0x1ea8: 0xdc31, 0x1ea9: 0xdc11, + 0x1eaa: 0xdc51, 0x1eab: 0xdc71, 0x1eac: 0xdc91, 0x1ead: 0xdcb1, 0x1eae: 0xdcd1, 0x1eaf: 0xdcf1, + 0x1eb0: 0xdd11, 0x1eb1: 0xdd31, 0x1eb2: 0xdd51, 0x1eb3: 0xdd71, 0x1eb4: 0xdd91, 0x1eb5: 0xddb1, + 0x1eb6: 0xddd1, 0x1eb7: 0xddf1, 0x1eb8: 0x8df5, 0x1eb9: 0xde11, 0x1eba: 0xde31, 0x1ebb: 0xde51, + 0x1ebc: 0xde71, 0x1ebd: 0xde91, 0x1ebe: 0x8e15, 0x1ebf: 0xdeb1, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe5b1, 0x1ec1: 0xe5d1, 0x1ec2: 0xe5f1, 0x1ec3: 0xe611, 0x1ec4: 0xe631, 0x1ec5: 0xe651, + 0x1ec6: 0x8f35, 0x1ec7: 0xe671, 0x1ec8: 0xe691, 0x1ec9: 0xe6b1, 0x1eca: 0xe6d1, 0x1ecb: 0xe6f1, + 0x1ecc: 0xe711, 0x1ecd: 0x8f55, 0x1ece: 0xe731, 0x1ecf: 0xe751, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95, + 0x1ed2: 0xe771, 0x1ed3: 0xe791, 0x1ed4: 0xe7b1, 0x1ed5: 0xe7d1, 0x1ed6: 0xe7f1, 0x1ed7: 0xe811, + 0x1ed8: 0xe831, 0x1ed9: 0xe851, 0x1eda: 0xe871, 0x1edb: 0x8fb5, 0x1edc: 0xe891, 0x1edd: 0x8fd5, + 0x1ede: 0xe8b1, 0x1edf: 0x2040, 0x1ee0: 0xe8d1, 0x1ee1: 0xe8f1, 0x1ee2: 0xe911, 0x1ee3: 0x8ff5, + 0x1ee4: 0xe931, 0x1ee5: 0xe951, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0xe971, 0x1ee9: 0xe991, + 0x1eea: 0xe9b1, 0x1eeb: 0xe9d1, 0x1eec: 0xe9f1, 0x1eed: 0xe9f1, 0x1eee: 0xea11, 0x1eef: 0xea31, + 0x1ef0: 0xea51, 0x1ef1: 0xea71, 0x1ef2: 0xea91, 0x1ef3: 0xeab1, 0x1ef4: 0xead1, 0x1ef5: 0x9055, + 0x1ef6: 0xeaf1, 0x1ef7: 0x9075, 0x1ef8: 0xeb11, 0x1ef9: 0x9095, 0x1efa: 0xeb31, 0x1efb: 0x90b5, + 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0xeb51, 0x1eff: 0xeb71, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb91, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0xebb1, + 0x1f06: 0xebd1, 0x1f07: 0xebd1, 0x1f08: 0xebf1, 0x1f09: 0xec11, 0x1f0a: 0xec31, 0x1f0b: 0xec51, + 0x1f0c: 0xec71, 0x1f0d: 0x9195, 0x1f0e: 0xec91, 0x1f0f: 0xecb1, 0x1f10: 0xecd1, 0x1f11: 0xecf1, + 0x1f12: 0x91b5, 0x1f13: 0xed11, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0xed31, 0x1f17: 0xed51, + 0x1f18: 0xed71, 0x1f19: 0xed91, 0x1f1a: 0xedb1, 0x1f1b: 0xedd1, 0x1f1c: 0x9215, 0x1f1d: 0x9235, + 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0xedf1, 0x1f21: 0x9275, 0x1f22: 0xee11, 0x1f23: 0xee31, + 0x1f24: 0xee51, 0x1f25: 0x9295, 0x1f26: 0xee71, 0x1f27: 0xee91, 0x1f28: 0xeeb1, 0x1f29: 0xeed1, + 0x1f2a: 0xeef1, 0x1f2b: 0x92b5, 0x1f2c: 0xef11, 0x1f2d: 0xef31, 0x1f2e: 0xef51, 0x1f2f: 0xef71, + 0x1f30: 0xef91, 0x1f31: 0xefb1, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0xefd1, 0x1f35: 0x9315, + 0x1f36: 0xeff1, 0x1f37: 0x9335, 0x1f38: 0xf011, 0x1f39: 0xf031, 0x1f3a: 0xf051, 0x1f3b: 0x9355, + 0x1f3c: 0x9375, 0x1f3d: 0xf071, 0x1f3e: 0x9395, 0x1f3f: 0xf091, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6d1, 0x1f41: 0xf6f1, 0x1f42: 0xf711, 0x1f43: 0xf731, 0x1f44: 0xf751, 0x1f45: 0x9555, + 0x1f46: 0xf771, 0x1f47: 0xf791, 0x1f48: 0xf7b1, 0x1f49: 0xf7d1, 0x1f4a: 0xf7f1, 0x1f4b: 0x9575, + 0x1f4c: 0x9595, 0x1f4d: 0xf811, 0x1f4e: 0xf831, 0x1f4f: 0xf851, 0x1f50: 0xf871, 0x1f51: 0xf891, + 0x1f52: 0xf8b1, 0x1f53: 0x95b5, 0x1f54: 0xf8d1, 0x1f55: 0xf8f1, 0x1f56: 0xf911, 0x1f57: 0xf931, + 0x1f58: 0x95d5, 0x1f59: 0x95f5, 0x1f5a: 0xf951, 0x1f5b: 0xf971, 0x1f5c: 0xf991, 0x1f5d: 0x9615, + 0x1f5e: 0xf9b1, 0x1f5f: 0xf9d1, 0x1f60: 0x684d, 0x1f61: 0x9635, 0x1f62: 0xf9f1, 0x1f63: 0xfa11, + 0x1f64: 0xfa31, 0x1f65: 0x9655, 0x1f66: 0xfa51, 0x1f67: 0xfa71, 0x1f68: 0xfa91, 0x1f69: 0xfab1, + 0x1f6a: 0xfad1, 0x1f6b: 0xfaf1, 0x1f6c: 0xfb11, 0x1f6d: 0x9675, 0x1f6e: 0xfb31, 0x1f6f: 0xfb51, + 0x1f70: 0xfb71, 0x1f71: 0x9695, 0x1f72: 0xfb91, 0x1f73: 0xfbb1, 0x1f74: 0xfbd1, 0x1f75: 0xfbf1, + 0x1f76: 0x7b6d, 0x1f77: 0x96b5, 0x1f78: 0xfc11, 0x1f79: 0xfc31, 0x1f7a: 0xfc51, 0x1f7b: 0x96d5, + 0x1f7c: 0xfc71, 0x1f7d: 0x96f5, 0x1f7e: 0xfc91, 0x1f7f: 0xfc91, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfcb1, 0x1f81: 0x9715, 0x1f82: 0xfcd1, 0x1f83: 0xfcf1, 0x1f84: 0xfd11, 0x1f85: 0xfd31, + 0x1f86: 0xfd51, 0x1f87: 0xfd71, 0x1f88: 0xfd91, 0x1f89: 0x9735, 0x1f8a: 0xfdb1, 0x1f8b: 0xfdd1, + 0x1f8c: 0xfdf1, 0x1f8d: 0xfe11, 0x1f8e: 0xfe31, 0x1f8f: 0xfe51, 0x1f90: 0x9755, 0x1f91: 0xfe71, + 0x1f92: 0x9775, 0x1f93: 0x9795, 0x1f94: 0x97b5, 0x1f95: 0xfe91, 0x1f96: 0xfeb1, 0x1f97: 0xfed1, + 0x1f98: 0xfef1, 0x1f99: 0xff11, 0x1f9a: 0xff31, 0x1f9b: 0xff51, 0x1f9c: 0xff71, 0x1f9d: 0x97d5, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0x9b, + 0x1b0: 0xd0, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, + 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe0, + 0x1c8: 0xe1, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe2, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe5, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe6, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe7, + 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, + 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf8, 0x31f: 0xf9, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, + 0x3a8: 0x47, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x100, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x101, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9f, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, + 0x3d0: 0x10e, 0x3d1: 0x9f, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xba, 0x3e6: 0x11a, 0x3e7: 0x11b, + 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x5b, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x121, 0x3f1: 0x122, 0x3f2: 0x123, 0x3f3: 0x124, 0x3f4: 0x125, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x127, 0x3fd: 0x128, 0x3fe: 0xba, 0x3ff: 0x129, + // Block 0x10, offset 0x400 + 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, + 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, + 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0x143, 0x427: 0x144, + 0x428: 0x145, 0x429: 0x146, 0x42a: 0x147, 0x42b: 0x148, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x149, 0x431: 0x14a, 0x432: 0x14b, 0x433: 0xba, 0x434: 0x14c, 0x435: 0x14d, 0x436: 0x14e, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14f, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0x150, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x151, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x152, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x153, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x154, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x155, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x148, 0x529: 0x156, 0x52a: 0xba, 0x52b: 0x157, 0x52c: 0x158, 0x52d: 0x159, 0x52e: 0x15a, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0x15b, 0x53a: 0x15c, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15d, 0x53e: 0x15e, 0x53f: 0x15f, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x160, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x161, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x162, 0x585: 0x163, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x164, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x165, 0x5b2: 0x166, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x167, 0x5c4: 0x168, 0x5c5: 0x169, 0x5c6: 0x16a, 0x5c7: 0x16b, + 0x5c8: 0x9b, 0x5c9: 0x16c, 0x5ca: 0xba, 0x5cb: 0x16d, 0x5cc: 0x9b, 0x5cd: 0x16e, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x16f, 0x5e9: 0x170, 0x5ea: 0x171, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x172, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0x173, 0x605: 0x174, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0x175, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x121, 0x621: 0x121, 0x622: 0x121, 0x623: 0x176, 0x624: 0x6f, 0x625: 0x177, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0x178, 0x632: 0x179, 0x633: 0xba, 0x634: 0x17a, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x17b, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x17c, 0x641: 0x9b, 0x642: 0x17d, 0x643: 0x17e, 0x644: 0x73, 0x645: 0x74, 0x646: 0x17f, 0x647: 0x180, + 0x648: 0x75, 0x649: 0x181, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x182, 0x65c: 0x9b, 0x65d: 0x183, 0x65e: 0x9b, 0x65f: 0x184, + 0x660: 0x185, 0x661: 0x186, 0x662: 0x187, 0x663: 0xba, 0x664: 0x188, 0x665: 0x189, 0x666: 0x18a, 0x667: 0x18b, + 0x668: 0x9b, 0x669: 0x18c, 0x66a: 0x18d, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x18e, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x18f, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x190, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x191, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x192, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x193, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x194, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x195, 0x841: 0x196, 0x842: 0xba, 0x843: 0xba, 0x844: 0x197, 0x845: 0x197, 0x846: 0x197, 0x847: 0x198, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 284 entries, 568 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x26c, 0x27d, 0x281, 0x28c, 0x290, 0x299, 0x2a1, 0x2a7, 0x2ac, 0x2af, 0x2b3, 0x2b9, 0x2bd, 0x2c1, 0x2c5, 0x2cb, 0x2d3, 0x2da, 0x2e5, 0x2ef, 0x2f3, 0x2f6, 0x2fc, 0x300, 0x302, 0x305, 0x307, 0x30a, 0x314, 0x317, 0x326, 0x32a, 0x32f, 0x332, 0x336, 0x33b, 0x340, 0x346, 0x352, 0x361, 0x367, 0x36b, 0x37a, 0x37f, 0x387, 0x391, 0x39c, 0x3a4, 0x3b5, 0x3be, 0x3ce, 0x3db, 0x3e5, 0x3ea, 0x3f7, 0x3fb, 0x400, 0x402, 0x406, 0x408, 0x40c, 0x415, 0x41b, 0x41f, 0x42f, 0x439, 0x43e, 0x441, 0x447, 0x44e, 0x453, 0x457, 0x45d, 0x462, 0x46b, 0x470, 0x476, 0x47d, 0x484, 0x48b, 0x48f, 0x494, 0x497, 0x49c, 0x4a8, 0x4ae, 0x4b3, 0x4ba, 0x4c2, 0x4c7, 0x4cb, 0x4db, 0x4e2, 0x4e6, 0x4ea, 0x4f1, 0x4f3, 0x4f6, 0x4f9, 0x4fd, 0x506, 0x50a, 0x512, 0x51a, 0x51e, 0x524, 0x52d, 0x539, 0x540, 0x549, 0x553, 0x55a, 0x568, 0x575, 0x582, 0x58b, 0x58f, 0x59f, 0x5a7, 0x5b2, 0x5bb, 0x5c1, 0x5c9, 0x5d2, 0x5dd, 0x5e0, 0x5ec, 0x5f5, 0x5f8, 0x5fd, 0x602, 0x60f, 0x61a, 0x623, 0x62d, 0x630, 0x63a, 0x643, 0x64f, 0x65c, 0x669, 0x677, 0x67e, 0x682, 0x685, 0x68a, 0x68d, 0x692, 0x695, 0x69c, 0x6a3, 0x6a7, 0x6b2, 0x6b5, 0x6b8, 0x6bb, 0x6c1, 0x6c7, 0x6cd, 0x6d0, 0x6d3, 0x6d6, 0x6dd, 0x6e0, 0x6e5, 0x6ef, 0x6f2, 0x6f6, 0x705, 0x711, 0x715, 0x71a, 0x71e, 0x723, 0x727, 0x72c, 0x735, 0x740, 0x746, 0x74c, 0x752, 0x758, 0x761, 0x764, 0x767, 0x76b, 0x76f, 0x773, 0x779, 0x77f, 0x784, 0x787, 0x797, 0x79e, 0x7a1, 0x7a6, 0x7aa, 0x7b0, 0x7b5, 0x7b9, 0x7bf, 0x7c5, 0x7c9, 0x7d2, 0x7d7, 0x7da, 0x7dd, 0x7e1, 0x7e5, 0x7e8, 0x7f8, 0x809, 0x80e, 0x810, 0x812} + +// idnaSparseValues: 2069 entries, 8276 bytes +var idnaSparseValues = [2069]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x86 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x224 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22e + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23a + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x246 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x252 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25f + {value: 0x0000, lo: 0x0c}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x059d, lo: 0x90, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x059d, lo: 0xbd, hi: 0xbf}, + // Block 0x45, offset 0x26c + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x46, offset 0x27d + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x281 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x28c + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x290 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x299 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x2a1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09dd, lo: 0xa9, hi: 0xa9}, + {value: 0x09fd, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2ac + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2af + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b3 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2bd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2c1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xbf}, + // Block 0x53, offset 0x2c5 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ebd, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2cb + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x56, offset 0x2da + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2e5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x58, offset 0x2ef + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x59, offset 0x2f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0x5a, offset 0x2f6 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0ef5, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x2fc + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0f15, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5c, offset 0x300 + {value: 0x0020, lo: 0x01}, + {value: 0x0f35, lo: 0x80, hi: 0xbf}, + // Block 0x5d, offset 0x302 + {value: 0x0020, lo: 0x02}, + {value: 0x1735, lo: 0x80, hi: 0x8f}, + {value: 0x1915, lo: 0x90, hi: 0xbf}, + // Block 0x5e, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x1f15, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x307 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x60, offset 0x30a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x61, offset 0x314 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x317 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a35, lo: 0xb1, hi: 0xb1}, + {value: 0x2a55, lo: 0xb2, hi: 0xb2}, + {value: 0x2a75, lo: 0xb3, hi: 0xb3}, + {value: 0x2a95, lo: 0xb4, hi: 0xb4}, + {value: 0x2a75, lo: 0xb5, hi: 0xb5}, + {value: 0x2ab5, lo: 0xb6, hi: 0xb6}, + {value: 0x2ad5, lo: 0xb7, hi: 0xb7}, + {value: 0x2af5, lo: 0xb8, hi: 0xb9}, + {value: 0x2b15, lo: 0xba, hi: 0xbb}, + {value: 0x2b35, lo: 0xbc, hi: 0xbd}, + {value: 0x2b15, lo: 0xbe, hi: 0xbf}, + // Block 0x63, offset 0x326 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x64, offset 0x32a + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x332 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x67, offset 0x336 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6a, offset 0x346 + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0xe00d, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x83}, + {value: 0x03f5, lo: 0x84, hi: 0x84}, + {value: 0x1329, lo: 0x85, hi: 0x85}, + {value: 0x447d, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6b, offset 0x352 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x361 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x367 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6e, offset 0x36b + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x37a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x70, offset 0x37f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x71, offset 0x387 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x391 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x39c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x74, offset 0x3a4 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x75, offset 0x3b5 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3be + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3ce + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x78, offset 0x3db + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x449d, lo: 0x9c, hi: 0x9c}, + {value: 0x44b5, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x44cd, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3e5 + {value: 0x0000, lo: 0x04}, + {value: 0x44ed, lo: 0x80, hi: 0x8f}, + {value: 0x450d, lo: 0x90, hi: 0x9f}, + {value: 0x452d, lo: 0xa0, hi: 0xaf}, + {value: 0x450d, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3ea + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7b, offset 0x3f7 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x3fb + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7d, offset 0x400 + {value: 0x0020, lo: 0x01}, + {value: 0x454d, lo: 0x80, hi: 0xbf}, + // Block 0x7e, offset 0x402 + {value: 0x0020, lo: 0x03}, + {value: 0x4d4d, lo: 0x80, hi: 0x94}, + {value: 0x4b0d, lo: 0x95, hi: 0x95}, + {value: 0x4fed, lo: 0x96, hi: 0xbf}, + // Block 0x7f, offset 0x406 + {value: 0x0020, lo: 0x01}, + {value: 0x552d, lo: 0x80, hi: 0xbf}, + // Block 0x80, offset 0x408 + {value: 0x0020, lo: 0x03}, + {value: 0x5d2d, lo: 0x80, hi: 0x84}, + {value: 0x568d, lo: 0x85, hi: 0x85}, + {value: 0x5dcd, lo: 0x86, hi: 0xbf}, + // Block 0x81, offset 0x40c + {value: 0x0020, lo: 0x08}, + {value: 0x6b8d, lo: 0x80, hi: 0x8f}, + {value: 0x6d4d, lo: 0x90, hi: 0x90}, + {value: 0x6d8d, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70ed, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x710d, lo: 0xb0, hi: 0xbf}, + // Block 0x82, offset 0x415 + {value: 0x0020, lo: 0x05}, + {value: 0x730d, lo: 0x80, hi: 0xad}, + {value: 0x656d, lo: 0xae, hi: 0xae}, + {value: 0x78cd, lo: 0xaf, hi: 0xb5}, + {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, + {value: 0x79ad, lo: 0xb7, hi: 0xbf}, + // Block 0x83, offset 0x41b + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x84, offset 0x41f + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x85, offset 0x42f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x86, offset 0x439 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x87, offset 0x43e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x88, offset 0x441 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x89, offset 0x447 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8a, offset 0x44e + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8b, offset 0x453 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8c, offset 0x457 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8d, offset 0x45d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8e, offset 0x462 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8f, offset 0x46b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x90, offset 0x470 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x91, offset 0x476 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8b0d, lo: 0x98, hi: 0x9f}, + {value: 0x8b25, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x92, offset 0x47d + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8b25, lo: 0xb0, hi: 0xb7}, + {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, + // Block 0x93, offset 0x484 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x94, offset 0x48b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x95, offset 0x48f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x494 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x97, offset 0x497 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x98, offset 0x49c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x4a8 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9a, offset 0x4ae + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9b, offset 0x4b3 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9c, offset 0x4ba + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9d, offset 0x4c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9e, offset 0x4c7 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0x9f, offset 0x4cb + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x4db + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa1, offset 0x4e2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa2, offset 0x4e6 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa3, offset 0x4ea + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa4, offset 0x4f1 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa5, offset 0x4f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa6, offset 0x4f6 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa7, offset 0x4f9 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa8, offset 0x4fd + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x506 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x50a + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xab, offset 0x512 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xac, offset 0x51a + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xad, offset 0x51e + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xae, offset 0x524 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xaf, offset 0x52d + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb0, offset 0x539 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb1, offset 0x540 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb2, offset 0x549 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb3, offset 0x553 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb4, offset 0x55a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb5, offset 0x568 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb6, offset 0x575 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb7, offset 0x582 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb8, offset 0x58b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb9, offset 0x58f + {value: 0x0000, lo: 0x0f}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xba, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbb, offset 0x5a7 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbc, offset 0x5b2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5bb + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbe, offset 0x5c1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5c9 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc0, offset 0x5d2 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc1, offset 0x5dd + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc2, offset 0x5e0 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5ec + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc4, offset 0x5f5 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc5, offset 0x5f8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5fd + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xc7, offset 0x602 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x9a, hi: 0x9b}, + {value: 0x3008, lo: 0x9c, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xbf}, + // Block 0xc8, offset 0x60f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x61a + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xca, offset 0x623 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xcb, offset 0x62d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xcc, offset 0x630 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x63a + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xce, offset 0x643 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xcf, offset 0x64f + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xd0, offset 0x65c + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xd1, offset 0x669 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd2, offset 0x677 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd3, offset 0x67e + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xd4, offset 0x682 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xd5, offset 0x685 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xd6, offset 0x68a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xd7, offset 0x68d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0340, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd8, offset 0x692 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd9, offset 0x695 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xda, offset 0x69c + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xdb, offset 0x6a3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xdc, offset 0x6a7 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xdd, offset 0x6b2 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xde, offset 0x6b5 + {value: 0x0000, lo: 0x02}, + {value: 0xe105, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xdf, offset 0x6b8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xe0, offset 0x6bb + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbf}, + // Block 0xe1, offset 0x6c1 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe2, offset 0x6c7 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xe3, offset 0x6cd + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xe4, offset 0x6d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe5, offset 0x6d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe6, offset 0x6d6 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe7, offset 0x6dd + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xe8, offset 0x6e0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xe9, offset 0x6e5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xea, offset 0x6ef + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xeb, offset 0x6f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xec, offset 0x6f6 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xed, offset 0x705 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xee, offset 0x711 + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xef, offset 0x715 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xf0, offset 0x71a + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf1, offset 0x71e + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf2, offset 0x723 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf3, offset 0x727 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xf4, offset 0x72c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xf5, offset 0x735 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xf6, offset 0x740 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xf7, offset 0x746 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xf8, offset 0x74c + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xf9, offset 0x752 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfa, offset 0x758 + {value: 0x0000, lo: 0x08}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0b08, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xfb, offset 0x761 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0xfc, offset 0x764 + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xfd, offset 0x767 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0818, lo: 0x81, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xfe, offset 0x76b + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xff, offset 0x76f + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x100, offset 0x773 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x101, offset 0x779 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x102, offset 0x77f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1d9, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0x103, offset 0x784 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0x104, offset 0x787 + {value: 0x0000, lo: 0x0f}, + {value: 0xc801, lo: 0x80, hi: 0x80}, + {value: 0xc851, lo: 0x81, hi: 0x81}, + {value: 0xc8a1, lo: 0x82, hi: 0x82}, + {value: 0xc8f1, lo: 0x83, hi: 0x83}, + {value: 0xc941, lo: 0x84, hi: 0x84}, + {value: 0xc991, lo: 0x85, hi: 0x85}, + {value: 0xc9e1, lo: 0x86, hi: 0x86}, + {value: 0xca31, lo: 0x87, hi: 0x87}, + {value: 0xca81, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcad1, lo: 0x90, hi: 0x90}, + {value: 0xcaf1, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0x105, offset 0x797 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x106, offset 0x79e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x107, offset 0x7a1 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x108, offset 0x7a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x109, offset 0x7aa + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x10a, offset 0x7b0 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x10b, offset 0x7b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x10c, offset 0x7b9 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0x10d, offset 0x7bf + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xa4}, + {value: 0x0018, lo: 0xa5, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xbf}, + // Block 0x10e, offset 0x7c5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x10f, offset 0x7c9 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x110, offset 0x7d2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x111, offset 0x7d7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x112, offset 0x7da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x113, offset 0x7dd + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x114, offset 0x7e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x115, offset 0x7e5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x116, offset 0x7e8 + {value: 0x0020, lo: 0x0f}, + {value: 0xded1, lo: 0x80, hi: 0x89}, + {value: 0x8e35, lo: 0x8a, hi: 0x8a}, + {value: 0xe011, lo: 0x8b, hi: 0x9c}, + {value: 0x8e55, lo: 0x9d, hi: 0x9d}, + {value: 0xe251, lo: 0x9e, hi: 0xa2}, + {value: 0x8e75, lo: 0xa3, hi: 0xa3}, + {value: 0xe2f1, lo: 0xa4, hi: 0xab}, + {value: 0x7f0d, lo: 0xac, hi: 0xac}, + {value: 0xe3f1, lo: 0xad, hi: 0xaf}, + {value: 0x8e95, lo: 0xb0, hi: 0xb0}, + {value: 0xe451, lo: 0xb1, hi: 0xb6}, + {value: 0x8eb5, lo: 0xb7, hi: 0xb9}, + {value: 0xe511, lo: 0xba, hi: 0xba}, + {value: 0x8f15, lo: 0xbb, hi: 0xbb}, + {value: 0xe531, lo: 0xbc, hi: 0xbf}, + // Block 0x117, offset 0x7f8 + {value: 0x0020, lo: 0x10}, + {value: 0x93b5, lo: 0x80, hi: 0x80}, + {value: 0xf0b1, lo: 0x81, hi: 0x86}, + {value: 0x93d5, lo: 0x87, hi: 0x8a}, + {value: 0xda11, lo: 0x8b, hi: 0x8b}, + {value: 0xf171, lo: 0x8c, hi: 0x96}, + {value: 0x9455, lo: 0x97, hi: 0x97}, + {value: 0xf2d1, lo: 0x98, hi: 0xa3}, + {value: 0x9475, lo: 0xa4, hi: 0xa6}, + {value: 0xf451, lo: 0xa7, hi: 0xaa}, + {value: 0x94d5, lo: 0xab, hi: 0xab}, + {value: 0xf4d1, lo: 0xac, hi: 0xac}, + {value: 0x94f5, lo: 0xad, hi: 0xad}, + {value: 0xf4f1, lo: 0xae, hi: 0xaf}, + {value: 0x9515, lo: 0xb0, hi: 0xb1}, + {value: 0xf531, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x118, offset 0x809 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x119, offset 0x80e + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x11a, offset 0x810 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x11b, offset 0x812 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42780 bytes (41KiB); checksum: 29936AB9 diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go index 685f0e7ea..dc5225b6d 100644 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -403,9 +403,9 @@ func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, resu // Where should scanning start? if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) } // The i'th value is computed as show below. diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go index bc861e1f3..4f66adfca 100644 --- a/vendor/golang.org/x/net/xsrftoken/xsrf.go +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -20,9 +20,9 @@ import ( // It is exported so clients may set cookie timeouts that match generated tokens. const Timeout = 24 * time.Hour -// clean sanitizes a string for inclusion in a token by replacing all ":"s. +// clean sanitizes a string for inclusion in a token by replacing all ":" with "::". func clean(s string) string { - return strings.Replace(s, ":", "_", -1) + return strings.Replace(s, `:`, `::`, -1) } // Generate returns a URL-safe secure XSRF token that expires in 24 hours. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..06f84b855 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..15a8b8520 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +// +build aix,ppc64 +// +build !gccgo + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993d..000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index 4d5b531b5..000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424e..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc3..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c96009951..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b409909..000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index 07f8960ff..000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beede..000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692..000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d..000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index 747079895..000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 2dd4f9542..000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 8aafbe446..000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f934..000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go deleted file mode 100644 index f7941701e..000000000 --- a/vendor/golang.org/x/text/encoding/charmap/maketables.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/internal/gen" -) - -const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + - "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + - ` !"#$%&'()*+,-./0123456789:;<=>?` + - `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + - "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" - -var encodings = []struct { - name string - mib string - comment string - varName string - replacement byte - mapping string -}{ - { - "IBM Code Page 037", - "IBM037", - "", - "CodePage037", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm", - }, - { - "IBM Code Page 437", - "PC8CodePage437", - "", - "CodePage437", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", - }, - { - "IBM Code Page 850", - "PC850Multilingual", - "", - "CodePage850", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", - }, - { - "IBM Code Page 852", - "PCp852", - "", - "CodePage852", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", - }, - { - "IBM Code Page 855", - "IBM855", - "", - "CodePage855", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", - }, - { - "Windows Code Page 858", // PC latin1 with Euro - "IBM00858", - "", - "CodePage858", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", - }, - { - "IBM Code Page 860", - "IBM860", - "", - "CodePage860", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm", - }, - { - "IBM Code Page 862", - "PC862LatinHebrew", - "", - "CodePage862", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", - }, - { - "IBM Code Page 863", - "IBM863", - "", - "CodePage863", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm", - }, - { - "IBM Code Page 865", - "IBM865", - "", - "CodePage865", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm", - }, - { - "IBM Code Page 866", - "IBM866", - "", - "CodePage866", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-ibm866.txt", - }, - { - "IBM Code Page 1047", - "IBM1047", - "", - "CodePage1047", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm", - }, - { - "IBM Code Page 1140", - "IBM01140", - "", - "CodePage1140", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm", - }, - { - "ISO 8859-1", - "ISOLatin1", - "", - "ISO8859_1", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", - }, - { - "ISO 8859-2", - "ISOLatin2", - "", - "ISO8859_2", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", - }, - { - "ISO 8859-3", - "ISOLatin3", - "", - "ISO8859_3", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", - }, - { - "ISO 8859-4", - "ISOLatin4", - "", - "ISO8859_4", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", - }, - { - "ISO 8859-5", - "ISOLatinCyrillic", - "", - "ISO8859_5", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", - }, - { - "ISO 8859-6", - "ISOLatinArabic", - "", - "ISO8859_6,ISO8859_6E,ISO8859_6I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", - }, - { - "ISO 8859-7", - "ISOLatinGreek", - "", - "ISO8859_7", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", - }, - { - "ISO 8859-8", - "ISOLatinHebrew", - "", - "ISO8859_8,ISO8859_8E,ISO8859_8I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", - }, - { - "ISO 8859-9", - "ISOLatin5", - "", - "ISO8859_9", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm", - }, - { - "ISO 8859-10", - "ISOLatin6", - "", - "ISO8859_10", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", - }, - { - "ISO 8859-13", - "ISO885913", - "", - "ISO8859_13", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", - }, - { - "ISO 8859-14", - "ISO885914", - "", - "ISO8859_14", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", - }, - { - "ISO 8859-15", - "ISO885915", - "", - "ISO8859_15", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", - }, - { - "ISO 8859-16", - "ISO885916", - "", - "ISO8859_16", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", - }, - { - "KOI8-R", - "KOI8R", - "", - "KOI8R", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-r.txt", - }, - { - "KOI8-U", - "KOI8U", - "", - "KOI8U", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-u.txt", - }, - { - "Macintosh", - "Macintosh", - "", - "Macintosh", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-macintosh.txt", - }, - { - "Macintosh Cyrillic", - "MacintoshCyrillic", - "", - "MacintoshCyrillic", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", - }, - { - "Windows 874", - "Windows874", - "", - "Windows874", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-874.txt", - }, - { - "Windows 1250", - "Windows1250", - "", - "Windows1250", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1250.txt", - }, - { - "Windows 1251", - "Windows1251", - "", - "Windows1251", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1251.txt", - }, - { - "Windows 1252", - "Windows1252", - "", - "Windows1252", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1252.txt", - }, - { - "Windows 1253", - "Windows1253", - "", - "Windows1253", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1253.txt", - }, - { - "Windows 1254", - "Windows1254", - "", - "Windows1254", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1254.txt", - }, - { - "Windows 1255", - "Windows1255", - "", - "Windows1255", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1255.txt", - }, - { - "Windows 1256", - "Windows1256", - "", - "Windows1256", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1256.txt", - }, - { - "Windows 1257", - "Windows1257", - "", - "Windows1257", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1257.txt", - }, - { - "Windows 1258", - "Windows1258", - "", - "Windows1258", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1258.txt", - }, - { - "X-User-Defined", - "XUserDefined", - "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", - "XUserDefined", - encoding.ASCIISub, - ascii + - "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + - "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + - "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + - "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + - "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + - "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + - "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + - "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + - "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + - "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + - "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + - "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + - "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + - "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + - "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + - "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", - }, -} - -func getWHATWG(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 128) - for i := range mapping { - mapping[i] = '\ufffd' - } - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, 0 - if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 128 <= x { - log.Fatalf("code %d is out of range", x) - } - if 0x80 <= y && y < 0xa0 { - // We diverge from the WHATWG spec by mapping control characters - // in the range [0x80, 0xa0) to U+FFFD. - continue - } - mapping[x] = rune(y) - } - return ascii + string(mapping) -} - -func getUCM(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 256) - for i := range mapping { - mapping[i] = '\ufffd' - } - - charsFound := 0 - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - var c byte - var r rune - if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { - continue - } - mapping[c] = r - charsFound++ - } - - if charsFound < 200 { - log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) - } - - return string(mapping) -} - -func main() { - mibs := map[string]bool{} - all := []string{} - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "charmap") - - printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } - - printf("import (\n") - printf("\t\"golang.org/x/text/encoding\"\n") - printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") - printf(")\n\n") - for _, e := range encodings { - varNames := strings.Split(e.varName, ",") - all = append(all, varNames...) - varName := varNames[0] - switch { - case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): - e.mapping = getWHATWG(e.mapping) - case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): - e.mapping = getUCM(e.mapping) - } - - asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 - if asciiSuperset { - low = 0x80 - } - lvn := 1 - if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { - lvn = 3 - } - lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] - printf("// %s is the %s encoding.\n", varName, e.name) - if e.comment != "" { - printf("//\n// %s\n", e.comment) - } - printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n", - varName, lowerVarName, lowerVarName, e.name) - if mibs[e.mib] { - log.Fatalf("MIB type %q declared multiple times.", e.mib) - } - printf("mib: identifier.%s,\n", e.mib) - printf("asciiSuperset: %t,\n", asciiSuperset) - printf("low: 0x%02x,\n", low) - printf("replacement: 0x%02x,\n", e.replacement) - - printf("decode: [256]utf8Enc{\n") - i, backMapping := 0, map[rune]byte{} - for _, c := range e.mapping { - if _, ok := backMapping[c]; !ok && c != utf8.RuneError { - backMapping[c] = byte(i) - } - var buf [8]byte - n := utf8.EncodeRune(buf[:], c) - if n > 3 { - panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) - } - printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) - if i%2 == 1 { - printf("\n") - } - i++ - } - printf("},\n") - - printf("encode: [256]uint32{\n") - encode := make([]uint32, 0, 256) - for c, i := range backMapping { - encode = append(encode, uint32(i)<<24|uint32(c)) - } - sort.Sort(byRune(encode)) - for len(encode) < cap(encode) { - encode = append(encode, encode[len(encode)-1]) - } - for i, enc := range encode { - printf("0x%08x,", enc) - if i%8 == 7 { - printf("\n") - } - } - printf("},\n}\n") - - // Add an estimate of the size of a single Charmap{} struct value, which - // includes two 256 elem arrays of 4 bytes and some extra fields, which - // align to 3 uint64s on 64-bit architectures. - w.Size += 2*4*256 + 3*8 - } - // TODO: add proper line breaking. - printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) -} - -type byRune []uint32 - -func (b byRune) Len() int { return len(b) } -func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } -func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go deleted file mode 100644 index ac6b4a77f..000000000 --- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type group struct { - Encodings []struct { - Labels []string - Name string - } -} - -func main() { - gen.Init() - - r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json") - var groups []group - if err := json.NewDecoder(r).Decode(&groups); err != nil { - log.Fatalf("Error reading encodings.json: %v", err) - } - - w := &bytes.Buffer{} - fmt.Fprintln(w, "type htmlEncoding byte") - fmt.Fprintln(w, "const (") - for i, g := range groups { - for _, e := range g.Encodings { - key := strings.ToLower(e.Name) - name := consts[key] - if name == "" { - log.Fatalf("No const defined for %s.", key) - } - if i == 0 { - fmt.Fprintf(w, "%s htmlEncoding = iota\n", name) - } else { - fmt.Fprintf(w, "%s\n", name) - } - } - } - fmt.Fprintln(w, "numEncodings") - fmt.Fprint(w, ")\n\n") - - fmt.Fprintln(w, "var canonical = [numEncodings]string{") - for _, g := range groups { - for _, e := range g.Encodings { - fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name)) - } - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{") - for _, g := range groups { - for _, e := range g.Encodings { - for _, l := range e.Labels { - key := strings.ToLower(e.Name) - name := consts[key] - fmt.Fprintf(w, "%q: %s,\n", l, name) - } - } - } - fmt.Fprint(w, "}\n\n") - - var tags []string - fmt.Fprintln(w, "var localeMap = []htmlEncoding{") - for _, loc := range locales { - tags = append(tags, loc.tag) - fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag) - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " ")) - - gen.WriteGoFile("tables.go", "htmlindex", w.Bytes()) -} - -// consts maps canonical encoding name to internal constant. -var consts = map[string]string{ - "utf-8": "utf8", - "ibm866": "ibm866", - "iso-8859-2": "iso8859_2", - "iso-8859-3": "iso8859_3", - "iso-8859-4": "iso8859_4", - "iso-8859-5": "iso8859_5", - "iso-8859-6": "iso8859_6", - "iso-8859-7": "iso8859_7", - "iso-8859-8": "iso8859_8", - "iso-8859-8-i": "iso8859_8I", - "iso-8859-10": "iso8859_10", - "iso-8859-13": "iso8859_13", - "iso-8859-14": "iso8859_14", - "iso-8859-15": "iso8859_15", - "iso-8859-16": "iso8859_16", - "koi8-r": "koi8r", - "koi8-u": "koi8u", - "macintosh": "macintosh", - "windows-874": "windows874", - "windows-1250": "windows1250", - "windows-1251": "windows1251", - "windows-1252": "windows1252", - "windows-1253": "windows1253", - "windows-1254": "windows1254", - "windows-1255": "windows1255", - "windows-1256": "windows1256", - "windows-1257": "windows1257", - "windows-1258": "windows1258", - "x-mac-cyrillic": "macintoshCyrillic", - "gbk": "gbk", - "gb18030": "gb18030", - // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG - "big5": "big5", - "euc-jp": "eucjp", - "iso-2022-jp": "iso2022jp", - "shift_jis": "shiftJIS", - "euc-kr": "euckr", - "replacement": "replacement", - "utf-16be": "utf16be", - "utf-16le": "utf16le", - "x-user-defined": "xUserDefined", -} - -// locales is taken from -// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm. -var locales = []struct{ tag, name string }{ - // The default value. Explicitly state latin to benefit from the exact - // script option, while still making 1252 the default encoding for languages - // written in Latin script. - {"und_Latn", "windows-1252"}, - {"ar", "windows-1256"}, - {"ba", "windows-1251"}, - {"be", "windows-1251"}, - {"bg", "windows-1251"}, - {"cs", "windows-1250"}, - {"el", "iso-8859-7"}, - {"et", "windows-1257"}, - {"fa", "windows-1256"}, - {"he", "windows-1255"}, - {"hr", "windows-1250"}, - {"hu", "iso-8859-2"}, - {"ja", "shift_jis"}, - {"kk", "windows-1251"}, - {"ko", "euc-kr"}, - {"ku", "windows-1254"}, - {"ky", "windows-1251"}, - {"lt", "windows-1257"}, - {"lv", "windows-1257"}, - {"mk", "windows-1251"}, - {"pl", "iso-8859-2"}, - {"ru", "windows-1251"}, - {"sah", "windows-1251"}, - {"sk", "windows-1250"}, - {"sl", "iso-8859-2"}, - {"sr", "windows-1251"}, - {"tg", "windows-1251"}, - {"th", "windows-874"}, - {"tr", "windows-1254"}, - {"tt", "windows-1251"}, - {"uk", "windows-1251"}, - {"vi", "windows-1258"}, - {"zh-hans", "gb18030"}, - {"zh-hant", "big5"}, -} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go deleted file mode 100644 index 26cfef9c6..000000000 --- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type registry struct { - XMLName xml.Name `xml:"registry"` - Updated string `xml:"updated"` - Registry []struct { - ID string `xml:"id,attr"` - Record []struct { - Name string `xml:"name"` - Xref []struct { - Type string `xml:"type,attr"` - Data string `xml:"data,attr"` - } `xml:"xref"` - Desc struct { - Data string `xml:",innerxml"` - // Any []struct { - // Data string `xml:",chardata"` - // } `xml:",any"` - // Data string `xml:",chardata"` - } `xml:"description,"` - MIB string `xml:"value"` - Alias []string `xml:"alias"` - MIME string `xml:"preferred_alias"` - } `xml:"record"` - } `xml:"registry"` -} - -func main() { - r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") - reg := ®istry{} - if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { - log.Fatalf("Error decoding charset registry: %v", err) - } - if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { - log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) - } - - w := &bytes.Buffer{} - fmt.Fprintf(w, "const (\n") - for _, rec := range reg.Registry[0].Record { - constName := "" - for _, a := range rec.Alias { - if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { - // Some of the constant definitions have comments in them. Strip those. - constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) - } - } - if constName == "" { - switch rec.MIB { - case "2085": - constName = "HZGB2312" // Not listed as alias for some reason. - default: - log.Fatalf("No cs alias defined for %s.", rec.MIB) - } - } - if rec.MIME != "" { - rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) - } - fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) - if len(rec.Desc.Data) > 0 { - fmt.Fprint(w, "// ") - d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) - inElem := true - attr := "" - for { - t, err := d.Token() - if err != nil { - if err != io.EOF { - log.Fatal(err) - } - break - } - switch x := t.(type) { - case xml.CharData: - attr = "" // Don't need attribute info. - a := bytes.Split([]byte(x), []byte("\n")) - for i, b := range a { - if b = bytes.TrimSpace(b); len(b) != 0 { - if !inElem && i > 0 { - fmt.Fprint(w, "\n// ") - } - inElem = false - fmt.Fprintf(w, "%s ", string(b)) - } - } - case xml.StartElement: - if x.Name.Local == "xref" { - inElem = true - use := false - for _, a := range x.Attr { - if a.Name.Local == "type" { - use = use || a.Value != "person" - } - if a.Name.Local == "data" && use { - // Patch up URLs to use https. From some links, the - // https version is different from the http one. - s := a.Value - s = strings.Replace(s, "http://", "https://", -1) - s = strings.Replace(s, "/unicode/", "/", -1) - attr = s + " " - } - } - } - case xml.EndElement: - inElem = false - fmt.Fprint(w, attr) - } - } - fmt.Fprint(w, "\n") - } - for _, x := range rec.Xref { - switch x.Type { - case "rfc": - fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) - case "uri": - fmt.Fprintf(w, "// Reference: %s\n", x.Data) - } - } - fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) - fmt.Fprintln(w) - } - fmt.Fprintln(w, ")") - - gen.WriteGoFile("mib.go", "identifier", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go deleted file mode 100644 index 023957a67..000000000 --- a/vendor/golang.org/x/text/encoding/japanese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -// TODO: Emoji extensions? -// https://www.unicode.org/faq/emoji_dingbats.html -// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -type entry struct { - jisCode, table int -} - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n") - fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n") - - reverse := [65536]entry{} - for i := range reverse { - reverse[i].table = -1 - } - - tables := []struct { - url string - name string - }{ - {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"}, - {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"}, - } - for i, table := range tables { - res, err := http.Get(table.url) - if err != nil { - log.Fatalf("%q: Get: %v", table.url, err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("%q: could not parse %q", table.url, s) - } - if x < 0 || 120*94 <= x { - log.Fatalf("%q: JIS code %d is out of range", table.url, x) - } - mapping[x] = y - if reverse[y].table == -1 { - reverse[y] = entry{jisCode: x, table: i} - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("%q: scanner error: %v", table.url, err) - } - - fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n", - table.name, table.name, table.url) - fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name) - for i, m := range mapping { - if m != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, m) - } - } - fmt.Printf("}\n\n") - } - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v.table == -1 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const (\n") - fmt.Printf("\tjis0208 = 1\n") - fmt.Printf("\tjis0212 = 2\n") - fmt.Printf("\tcodeMask = 0x7f\n") - fmt.Printf("\tcodeShift = 7\n") - fmt.Printf("\ttableShift = 14\n") - fmt.Printf(")\n\n") - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("//\n") - fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n") - fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n") - fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n") - fmt.Printf("// JIS code (94*j1 + j2) within that table.\n") - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x.table == -1 { - continue - } - fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n", - j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go deleted file mode 100644 index c84034fb6..000000000 --- a/vendor/golang.org/x/text/encoding/korean/maketables.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n") - fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x { - log.Fatalf("EUC-KR code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := uint16(0), uint16(0) - if x < 178*(0xc7-0x81) { - c0 = uint16(x/178) + 0x81 - c1 = uint16(x % 178) - switch { - case c1 < 1*26: - c1 += 0x41 - case c1 < 2*26: - c1 += 0x47 - default: - c1 += 0x4d - } - } else { - x -= 178 * (0xc7 - 0x81) - c0 = uint16(x/94) + 0xc7 - c1 = uint16(x%94) + 0xa1 - } - reverse[y] = c0<<8 | c1 - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go deleted file mode 100644 index 55016c786..000000000 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n") - fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n") - - printGB18030() - printGBK() -} - -func printGB18030() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n") - fmt.Printf("var gb18030 = [...][2]uint16{\n") - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint32(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0x10000 && y < 0x10000 { - fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y) - } - } - fmt.Printf("}\n\n") -} - -func printGBK() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*190 <= x { - log.Fatalf("GBK code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := x/190, x%190 - if c1 >= 0x3f { - c1++ - } - reverse[y] = (0x81+c0)<<8 | (0x40 + c1) - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go deleted file mode 100644 index cf7fdb31a..000000000 --- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n") - fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint32{} - reverse := [65536 * 4]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*157 <= x { - log.Fatalf("Big5 code %d is out of range", x) - } - mapping[x] = y - - // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that - // "The index pointer for code point in index is the first pointer - // corresponding to code point in index", which would normally mean - // that the code below should be guarded by "if reverse[y] == 0", but - // last instead of first seems to match the behavior of - // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in - // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148 - // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc") - // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc". - c0, c1 := x/157, x%157 - if c1 < 0x3f { - c1 += 0x40 - } else { - c1 += 0x62 - } - reverse[y] = (0x81+c0)<<8 | c1 - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n") - fmt.Printf("var decode = [...]uint32{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%08X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go deleted file mode 100644 index 0c36a052f..000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "flag" - "fmt" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -func main() { - gen.Init() - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "compact") - - fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) - - b := newBuilder(w) - gen.WriteCLDRVersion(w) - - b.writeCompactIndex() -} - -type builder struct { - w *gen.CodeWriter - data *cldr.CLDR - supp *cldr.SupplementalData -} - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatal(err) - } - b := builder{ - w: w, - data: data, - supp: data.Supplemental(), - } - return &b -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go deleted file mode 100644 index 136cefaf0..000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates derivative tables based on the language package itself. - -import ( - "fmt" - "log" - "sort" - "strings" - - "golang.org/x/text/internal/language" -) - -// Compact indices: -// Note -va-X variants only apply to localization variants. -// BCP variants only ever apply to language. -// The only ambiguity between tags is with regions. - -func (b *builder) writeCompactIndex() { - // Collect all language tags for which we have any data in CLDR. - m := map[language.Tag]bool{} - for _, lang := range b.data.Locales() { - // We include all locales unconditionally to be consistent with en_US. - // We want en_US, even though it has no data associated with it. - - // TODO: put any of the languages for which no data exists at the end - // of the index. This allows all components based on ICU to use that - // as the cutoff point. - // if x := data.RawLDML(lang); false || - // x.LocaleDisplayNames != nil || - // x.Characters != nil || - // x.Delimiters != nil || - // x.Measurement != nil || - // x.Dates != nil || - // x.Numbers != nil || - // x.Units != nil || - // x.ListPatterns != nil || - // x.Collations != nil || - // x.Segmentations != nil || - // x.Rbnf != nil || - // x.Annotations != nil || - // x.Metadata != nil { - - // TODO: support POSIX natively, albeit non-standard. - tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) - m[tag] = true - // } - } - - // TODO: plural rules are also defined for the deprecated tags: - // iw mo sh tl - // Consider removing these as compact tags. - - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.supp.Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - m[language.Make(lang)] = true - } - } - } - - var coreTags []language.CompactCoreInfo - var special []string - - for t := range m { - if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { - log.Fatalf("Unexpected extension %v in %v", x, t) - } - if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { - cci, ok := language.GetCompactCore(t) - if !ok { - log.Fatalf("Locale for non-basic language %q", t) - } - coreTags = append(coreTags, cci) - } else { - special = append(special, t.String()) - } - } - - w := b.w - - sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) - sort.Strings(special) - - w.WriteComment(` - NumCompactTags is the number of common tags. The maximum tag is - NumCompactTags-1.`) - w.WriteConst("NumCompactTags", len(m)) - - fmt.Fprintln(w, "const (") - for i, t := range coreTags { - fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) - } - for i, t := range special { - fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) - } - fmt.Fprintln(w, ")") - - w.WriteVar("coreTags", coreTags) - - w.WriteConst("specialTagsStr", strings.Join(special, " ")) -} - -func ident(s string) string { - return strings.Replace(s, "-", "", -1) + "Index" -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go deleted file mode 100644 index 9543d5832..000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/language" - "golang.org/x/text/internal/language/compact" - "golang.org/x/text/unicode/cldr" -) - -func main() { - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile("parents.go", "compact") - - // Create parents table. - type ID uint16 - parents := make([]ID, compact.NumCompactTags) - for _, loc := range data.Locales() { - tag := language.MustParse(loc) - index, ok := compact.FromTag(tag) - if !ok { - continue - } - parentIndex := compact.ID(0) // und - for p := tag.Parent(); p != language.Und; p = p.Parent() { - if x, ok := compact.FromTag(p); ok { - parentIndex = x - break - } - } - parents[index] = ID(parentIndex) - } - - w.WriteComment(` - parents maps a compact index of a tag to the compact index of the parent of - this tag.`) - w.WriteVar("parents", parents) -} diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go deleted file mode 100644 index cdcc7febc..000000000 --- a/vendor/golang.org/x/text/internal/language/gen.go +++ /dev/null @@ -1,1520 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/tag" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -var comment = []string{ - ` -lang holds an alphabetically sorted list of ISO-639 language identifiers. -All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -For 2-byte language identifiers, the two successive bytes have the following meaning: - - if the first letter of the 2- and 3-letter ISO codes are the same: - the second and third letter of the 3-letter ISO code. - - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -For 3-byte language identifiers the 4th byte is 0.`, - ` -langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -in lookup tables. The language ids for these language codes are derived directly -from the letters and are not consecutive.`, - ` -altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -to 2-letter language codes that cannot be derived using the method described above. -Each 3-letter code is followed by its 1-byte langID.`, - ` -altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, - ` -AliasMap maps langIDs to their suggested replacements.`, - ` -script is an alphabetically sorted list of ISO 15924 codes. The index -of the script in the string, divided by 4, is the internal scriptID.`, - ` -isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -the UN.M49 codes used for groups.)`, - ` -regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -Each 2-letter codes is followed by two bytes with the following meaning: - - [A-Z}{2}: the first letter of the 2-letter code plus these two - letters form the 3-letter ISO code. - - 0, n: index into altRegionISO3.`, - ` -regionTypes defines the status of a region for various standards.`, - ` -m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -codes indicating collections of regions.`, - ` -m49Index gives indexes into fromM49 based on the three most significant bits -of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in - fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -The region code is stored in the 9 lsb of the indexed value.`, - ` -fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, - ` -altRegionISO3 holds a list of 3-letter region codes that cannot be -mapped to 2-letter codes using the default algorithm. This is a short list.`, - ` -altRegionIDs holds a list of regionIDs the positions of which match those -of the 3-letter ISO codes in altRegionISO3.`, - ` -variantNumSpecialized is the number of specialized variants in variants.`, - ` -suppressScript is an index from langID to the dominant script for that language, -if it exists. If a script is given, it should be suppressed from the language tag.`, - ` -likelyLang is a lookup table, indexed by langID, for the most likely -scripts and regions given incomplete information. If more entries exist for a -given language, region and script are the index and size respectively -of the list in likelyLangList.`, - ` -likelyLangList holds lists info associated with likelyLang.`, - ` -likelyRegion is a lookup table, indexed by regionID, for the most likely -languages and scripts given incomplete information. If more entries exist -for a given regionID, lang and script are the index and size respectively -of the list in likelyRegionList. -TODO: exclude containers and user-definable regions from the list.`, - ` -likelyRegionList holds lists info associated with likelyRegion.`, - ` -likelyScript is a lookup table, indexed by scriptID, for the most likely -languages and regions given a script.`, - ` -nRegionGroups is the number of region groups.`, - ` -regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -where each set holds all groupings that are directly connected in a region -containment graph.`, - ` -regionInclusionBits is an array of bit vectors where every vector represents -a set of region groupings. These sets are used to compute the distance -between two regions for the purpose of language matching.`, - ` -regionInclusionNext marks, for each entry in regionInclusionBits, the set of -all groups that are reachable from the groups set in the respective entry.`, -} - -// TODO: consider changing some of these structures to tries. This can reduce -// memory, but may increase the need for memory allocations. This could be -// mitigated if we can piggyback on language tags for common cases. - -func failOnError(e error) { - if e != nil { - log.Panic(e) - } -} - -type setType int - -const ( - Indexed setType = 1 + iota // all elements must be of same size - Linear -) - -type stringSet struct { - s []string - sorted, frozen bool - - // We often need to update values after the creation of an index is completed. - // We include a convenience map for keeping track of this. - update map[string]string - typ setType // used for checking. -} - -func (ss *stringSet) clone() stringSet { - c := *ss - c.s = append([]string(nil), c.s...) - return c -} - -func (ss *stringSet) setType(t setType) { - if ss.typ != t && ss.typ != 0 { - log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) - } -} - -// parse parses a whitespace-separated string and initializes ss with its -// components. -func (ss *stringSet) parse(s string) { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(bufio.ScanWords) - for scan.Scan() { - ss.add(scan.Text()) - } -} - -func (ss *stringSet) assertChangeable() { - if ss.frozen { - log.Panic("attempt to modify a frozen stringSet") - } -} - -func (ss *stringSet) add(s string) { - ss.assertChangeable() - ss.s = append(ss.s, s) - ss.sorted = ss.frozen -} - -func (ss *stringSet) freeze() { - ss.compact() - ss.frozen = true -} - -func (ss *stringSet) compact() { - if ss.sorted { - return - } - a := ss.s - sort.Strings(a) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - a[k+1] = a[i] - k++ - } - } - ss.s = a[:k+1] - ss.sorted = ss.frozen -} - -type funcSorter struct { - fn func(a, b string) bool - sort.StringSlice -} - -func (s funcSorter) Less(i, j int) bool { - return s.fn(s.StringSlice[i], s.StringSlice[j]) -} - -func (ss *stringSet) sortFunc(f func(a, b string) bool) { - ss.compact() - sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) -} - -func (ss *stringSet) remove(s string) { - ss.assertChangeable() - if i, ok := ss.find(s); ok { - copy(ss.s[i:], ss.s[i+1:]) - ss.s = ss.s[:len(ss.s)-1] - } -} - -func (ss *stringSet) replace(ol, nu string) { - ss.s[ss.index(ol)] = nu - ss.sorted = ss.frozen -} - -func (ss *stringSet) index(s string) int { - ss.setType(Indexed) - i, ok := ss.find(s) - if !ok { - if i < len(ss.s) { - log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) - } - log.Panicf("find: item %q is not in list", s) - - } - return i -} - -func (ss *stringSet) find(s string) (int, bool) { - ss.compact() - i := sort.SearchStrings(ss.s, s) - return i, i != len(ss.s) && ss.s[i] == s -} - -func (ss *stringSet) slice() []string { - ss.compact() - return ss.s -} - -func (ss *stringSet) updateLater(v, key string) { - if ss.update == nil { - ss.update = map[string]string{} - } - ss.update[v] = key -} - -// join joins the string and ensures that all entries are of the same length. -func (ss *stringSet) join() string { - ss.setType(Indexed) - n := len(ss.s[0]) - for _, s := range ss.s { - if len(s) != n { - log.Panicf("join: not all entries are of the same length: %q", s) - } - } - ss.s = append(ss.s, strings.Repeat("\xff", n)) - return strings.Join(ss.s, "") -} - -// ianaEntry holds information for an entry in the IANA Language Subtag Repository. -// All types use the same entry. -// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various -// fields. -type ianaEntry struct { - typ string - description []string - scope string - added string - preferred string - deprecated string - suppressScript string - macro string - prefix []string -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData - - // indices - locale stringSet // common locales - lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data - langNoIndex stringSet // 3-letter ISO codes with no associated data - script stringSet // 4-letter ISO codes - region stringSet // 2-letter ISO or 3-digit UN M49 codes - variant stringSet // 4-8-alphanumeric variant code. - - // Region codes that are groups with their corresponding group IDs. - groups map[int]index - - // langInfo - registry map[string]*ianaEntry -} - -type index uint - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - failOnError(err) - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - b.parseRegistry() - return &b -} - -func (b *builder) parseRegistry() { - r := gen.OpenIANAFile("assignments/language-subtag-registry") - defer r.Close() - b.registry = make(map[string]*ianaEntry) - - scan := bufio.NewScanner(r) - scan.Split(bufio.ScanWords) - var record *ianaEntry - for more := scan.Scan(); more; { - key := scan.Text() - more = scan.Scan() - value := scan.Text() - switch key { - case "Type:": - record = &ianaEntry{typ: value} - case "Subtag:", "Tag:": - if s := strings.SplitN(value, "..", 2); len(s) > 1 { - for a := s[0]; a <= s[1]; a = inc(a) { - b.addToRegistry(a, record) - } - } else { - b.addToRegistry(value, record) - } - case "Suppress-Script:": - record.suppressScript = value - case "Added:": - record.added = value - case "Deprecated:": - record.deprecated = value - case "Macrolanguage:": - record.macro = value - case "Preferred-Value:": - record.preferred = value - case "Prefix:": - record.prefix = append(record.prefix, value) - case "Scope:": - record.scope = value - case "Description:": - buf := []byte(value) - for more = scan.Scan(); more; more = scan.Scan() { - b := scan.Bytes() - if b[0] == '%' || b[len(b)-1] == ':' { - break - } - buf = append(buf, ' ') - buf = append(buf, b...) - } - record.description = append(record.description, string(buf)) - continue - default: - continue - } - more = scan.Scan() - } - if scan.Err() != nil { - log.Panic(scan.Err()) - } -} - -func (b *builder) addToRegistry(key string, entry *ianaEntry) { - if info, ok := b.registry[key]; ok { - if info.typ != "language" || entry.typ != "extlang" { - log.Fatalf("parseRegistry: tag %q already exists", key) - } - } else { - b.registry[key] = entry - } -} - -var commentIndex = make(map[string]string) - -func init() { - for _, s := range comment { - key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) - commentIndex[key] = s - } -} - -func (b *builder) comment(name string) { - if s := commentIndex[name]; len(s) > 0 { - b.w.WriteComment(s) - } else { - fmt.Fprintln(b.w) - } -} - -func (b *builder) pf(f string, x ...interface{}) { - fmt.Fprintf(b.hw, f, x...) - fmt.Fprint(b.hw, "\n") -} - -func (b *builder) p(x ...interface{}) { - fmt.Fprintln(b.hw, x...) -} - -func (b *builder) addSize(s int) { - b.w.Size += s - b.pf("// Size: %d bytes", s) -} - -func (b *builder) writeConst(name string, x interface{}) { - b.comment(name) - b.w.WriteConst(name, x) -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - b.pf("const (") - for _, v := range values { - b.pf("\t_%s = %v", v, f(v)) - } - b.pf(")") -} - -// writeType writes the type of the given value, which must be a struct. -func (b *builder) writeType(value interface{}) { - b.comment(reflect.TypeOf(value).Name()) - b.w.WriteType(value) -} - -func (b *builder) writeSlice(name string, ss interface{}) { - b.writeSliceAddSize(name, 0, ss) -} - -func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { - b.comment(name) - b.w.Size += extraSize - v := reflect.ValueOf(ss) - t := v.Type().Elem() - b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) - - fmt.Fprintf(b.w, "var %s = ", name) - b.w.WriteArray(ss) - b.p() -} - -type FromTo struct { - From, To uint16 -} - -func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { - ss.sortFunc(func(a, b string) bool { - return index(a) < index(b) - }) - m := []FromTo{} - for _, s := range ss.s { - m = append(m, FromTo{index(s), index(ss.update[s])}) - } - b.writeSlice(name, m) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s string) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -func (b *builder) writeBitVector(name string, ss []string) { - vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) - for _, s := range ss { - v := strToInt(s) - vec[v/8] |= 1 << (v % 8) - } - b.writeSlice(name, vec) -} - -// TODO: convert this type into a list or two-stage trie. -func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size())) - for _, k := range m { - sz += len(k) - } - b.addSize(sz) - keys := []string{} - b.pf(`var %s = map[string]uint16{`, name) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - b.pf("\t%q: %v,", k, f(m[k])) - } - b.p("}") -} - -func (b *builder) writeMap(name string, m interface{}) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) - b.addSize(sz) - f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { - return strings.IndexRune("{}, ", r) != -1 - }) - sort.Strings(f[1:]) - b.pf(`var %s = %s{`, name, f[0]) - for _, kv := range f[1:] { - b.pf("\t%s,", kv) - } - b.p("}") -} - -func (b *builder) langIndex(s string) uint16 { - if s == "und" { - return 0 - } - if i, ok := b.lang.find(s); ok { - return uint16(i) - } - return uint16(strToInt(s)) + uint16(len(b.lang.s)) -} - -// inc advances the string to its lexicographical successor. -func inc(s string) string { - const maxTagLength = 4 - var buf [maxTagLength]byte - intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) - for i := 0; i < len(s); i++ { - if s[i] <= 'Z' { - buf[i] -= 'a' - 'A' - } - } - return string(buf[:len(s)]) -} - -func (b *builder) parseIndices() { - meta := b.supp.Metadata - - for k, v := range b.registry { - var ss *stringSet - switch v.typ { - case "language": - if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { - b.lang.add(k) - continue - } else { - ss = &b.langNoIndex - } - case "region": - ss = &b.region - case "script": - ss = &b.script - case "variant": - ss = &b.variant - default: - continue - } - ss.add(k) - } - // Include any language for which there is data. - for _, lang := range b.data.Locales() { - if x := b.data.RawLDML(lang); false || - x.LocaleDisplayNames != nil || - x.Characters != nil || - x.Delimiters != nil || - x.Measurement != nil || - x.Dates != nil || - x.Numbers != nil || - x.Units != nil || - x.ListPatterns != nil || - x.Collations != nil || - x.Segmentations != nil || - x.Rbnf != nil || - x.Annotations != nil || - x.Metadata != nil { - - from := strings.Split(lang, "_") - if lang := from[0]; lang != "root" { - b.lang.add(lang) - } - } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - if lang = strings.Split(lang, "_")[0]; lang != "root" { - b.lang.add(lang) - } - } - } - } - // Include languages in likely subtags. - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - b.lang.add(from[0]) - } - // Include ISO-639 alpha-3 bibliographic entries. - for _, a := range meta.Alias.LanguageAlias { - if a.Reason == "bibliographic" { - b.langNoIndex.add(a.Type) - } - } - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 { - b.region.add(reg.Type) - } - } - - for _, s := range b.lang.s { - if len(s) == 3 { - b.langNoIndex.remove(s) - } - } - b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) - b.writeConst("NumScripts", len(b.script.slice())) - b.writeConst("NumRegions", len(b.region.slice())) - - // Add dummy codes at the start of each list to represent "unspecified". - b.lang.add("---") - b.script.add("----") - b.region.add("---") - - // common locales - b.locale.parse(meta.DefaultContent.Locales) -} - -// TODO: region inclusion data will probably not be use used in future matchers. - -func (b *builder) computeRegionGroups() { - b.groups = make(map[int]index) - - // Create group indices. - for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. - b.groups[i] = index(len(b.groups)) - } - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - if _, ok := b.groups[group]; !ok { - b.groups[group] = index(len(b.groups)) - } - } - if len(b.groups) > 64 { - log.Fatalf("only 64 groups supported, found %d", len(b.groups)) - } - b.writeConst("nRegionGroups", len(b.groups)) -} - -var langConsts = []string{ - "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", - "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", - "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", - "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", - "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", - "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", - - // constants for grandfathered tags (if not already defined) - "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", - "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", -} - -// writeLanguage generates all tables needed for language canonicalization. -func (b *builder) writeLanguage() { - meta := b.supp.Metadata - - b.writeConst("nonCanonicalUnd", b.lang.index("und")) - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConst("langPrivateStart", b.langIndex("qaa")) - b.writeConst("langPrivateEnd", b.langIndex("qtz")) - - // Get language codes that need to be mapped (overlong 3-letter codes, - // deprecated 2-letter codes, legacy and grandfathered tags.) - langAliasMap := stringSet{} - aliasTypeMap := map[string]AliasType{} - - // altLangISO3 get the alternative ISO3 names that need to be mapped. - altLangISO3 := stringSet{} - // Add dummy start to avoid the use of index 0. - altLangISO3.add("---") - altLangISO3.updateLater("---", "aa") - - lang := b.lang.clone() - for _, a := range meta.Alias.LanguageAlias { - if a.Replacement == "" { - a.Replacement = "und" - } - // TODO: support mapping to tags - repl := strings.SplitN(a.Replacement, "_", 2)[0] - if a.Reason == "overlong" { - if len(a.Replacement) == 2 && len(a.Type) == 3 { - lang.updateLater(a.Replacement, a.Type) - } - } else if len(a.Type) <= 3 { - switch a.Reason { - case "macrolanguage": - aliasTypeMap[a.Type] = Macro - case "deprecated": - // handled elsewhere - continue - case "bibliographic", "legacy": - if a.Type == "no" { - continue - } - aliasTypeMap[a.Type] = Legacy - default: - log.Fatalf("new %s alias: %s", a.Reason, a.Type) - } - langAliasMap.add(a.Type) - langAliasMap.updateLater(a.Type, repl) - } - } - // Manually add the mapping of "nb" (Norwegian) to its macro language. - // This can be removed if CLDR adopts this change. - langAliasMap.add("nb") - langAliasMap.updateLater("nb", "no") - aliasTypeMap["nb"] = Macro - - for k, v := range b.registry { - // Also add deprecated values for 3-letter ISO codes, which CLDR omits. - if v.typ == "language" && v.deprecated != "" && v.preferred != "" { - langAliasMap.add(k) - langAliasMap.updateLater(k, v.preferred) - aliasTypeMap[k] = Deprecated - } - } - // Fix CLDR mappings. - lang.updateLater("tl", "tgl") - lang.updateLater("sh", "hbs") - lang.updateLater("mo", "mol") - lang.updateLater("no", "nor") - lang.updateLater("tw", "twi") - lang.updateLater("nb", "nob") - lang.updateLater("ak", "aka") - lang.updateLater("bh", "bih") - - // Ensure that each 2-letter code is matched with a 3-letter code. - for _, v := range lang.s[1:] { - s, ok := lang.update[v] - if !ok { - if s, ok = lang.update[langAliasMap.update[v]]; !ok { - continue - } - lang.update[v] = s - } - if v[0] != s[0] { - altLangISO3.add(s) - altLangISO3.updateLater(s, v) - } - } - - // Complete canonicalized language tags. - lang.freeze() - for i, v := range lang.s { - // We can avoid these manual entries by using the IANA registry directly. - // Seems easier to update the list manually, as changes are rare. - // The panic in this loop will trigger if we miss an entry. - add := "" - if s, ok := lang.update[v]; ok { - if s[0] == v[0] { - add = s[1:] - } else { - add = string([]byte{0, byte(altLangISO3.index(s))}) - } - } else if len(v) == 3 { - add = "\x00" - } else { - log.Panicf("no data for long form of %q", v) - } - lang.s[i] += add - } - b.writeConst("lang", tag.Index(lang.join())) - - b.writeConst("langNoIndexOffset", len(b.lang.s)) - - // space of all valid 3-letter language identifiers. - b.writeBitVector("langNoIndex", b.langNoIndex.slice()) - - altLangIndex := []uint16{} - for i, s := range altLangISO3.slice() { - altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) - if i > 0 { - idx := b.lang.index(altLangISO3.update[s]) - altLangIndex = append(altLangIndex, uint16(idx)) - } - } - b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) - b.writeSlice("altLangIndex", altLangIndex) - - b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) - types := make([]AliasType, len(langAliasMap.s)) - for i, s := range langAliasMap.s { - types[i] = aliasTypeMap[s] - } - b.writeSlice("AliasTypes", types) -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -func (b *builder) writeScript() { - b.writeConsts(b.script.index, scriptConsts...) - b.writeConst("script", tag.Index(b.script.join())) - - supp := make([]uint8, len(b.lang.slice())) - for i, v := range b.lang.slice()[1:] { - if sc := b.registry[v].suppressScript; sc != "" { - supp[i+1] = uint8(b.script.index(sc)) - } - } - b.writeSlice("suppressScript", supp) - - // There is only one deprecated script in CLDR. This value is hard-coded. - // We check here if the code must be updated. - for _, a := range b.supp.Metadata.Alias.ScriptAlias { - if a.Type != "Qaai" { - log.Panicf("unexpected deprecated stript %q", a.Type) - } - } -} - -func parseM49(s string) int16 { - if len(s) == 0 { - return 0 - } - v, err := strconv.ParseUint(s, 10, 10) - failOnError(err) - return int16(v) -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeRegion() { - b.writeConsts(b.region.index, regionConsts...) - - isoOffset := b.region.index("AA") - m49map := make([]int16, len(b.region.slice())) - fromM49map := make(map[int16]int) - altRegionISO3 := "" - altRegionIDs := []uint16{} - - b.writeConst("isoRegionOffset", isoOffset) - - // 2-letter region lookup and mapping to numeric codes. - regionISO := b.region.clone() - regionISO.s = regionISO.s[isoOffset:] - regionISO.sorted = false - - regionTypes := make([]byte, len(b.region.s)) - - // Is the region valid BCP 47? - for s, e := range b.registry { - if len(s) == 2 && s == strings.ToUpper(s) { - i := b.region.index(s) - for _, d := range e.description { - if strings.Contains(d, "Private use") { - regionTypes[i] = iso3166UserAssigned - } - } - regionTypes[i] |= bcp47Region - } - } - - // Is the region a valid ccTLD? - r := gen.OpenIANAFile("domains/root/db") - defer r.Close() - - buf, err := ioutil.ReadAll(r) - failOnError(err) - re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) - for _, m := range re.FindAllSubmatch(buf, -1) { - i := b.region.index(strings.ToUpper(string(m[1]))) - regionTypes[i] |= ccTLD - } - - b.writeSlice("regionTypes", regionTypes) - - iso3Set := make(map[string]int) - update := func(iso2, iso3 string) { - i := regionISO.index(iso2) - if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { - regionISO.s[i] += iso3[1:] - iso3Set[iso3] = -1 - } else { - if ok && j >= 0 { - regionISO.s[i] += string([]byte{0, byte(j)}) - } else { - iso3Set[iso3] = len(altRegionISO3) - regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) - altRegionISO3 += iso3 - altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - i := regionISO.index(tc.Type) + isoOffset - if d := m49map[i]; d != 0 { - log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) - } - m49 := parseM49(tc.Numeric) - m49map[i] = m49 - if r := fromM49map[m49]; r == 0 { - fromM49map[m49] = i - } else if r != i { - dep := b.registry[regionISO.s[r-isoOffset]].deprecated - if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { - fromM49map[m49] = i - } - } - } - for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { - if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { - from := parseM49(ta.Type) - if r := fromM49map[from]; r == 0 { - fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - if len(tc.Alpha3) == 3 { - update(tc.Type, tc.Alpha3) - } - } - // This entries are not included in territoryCodes. Mostly 3-letter variants - // of deleted codes and an entry for QU. - for _, m := range []struct{ iso2, iso3 string }{ - {"CT", "CTE"}, - {"DY", "DHY"}, - {"HV", "HVO"}, - {"JT", "JTN"}, - {"MI", "MID"}, - {"NH", "NHB"}, - {"NQ", "ATN"}, - {"PC", "PCI"}, - {"PU", "PUS"}, - {"PZ", "PCZ"}, - {"RH", "RHO"}, - {"VD", "VDR"}, - {"WK", "WAK"}, - // These three-letter codes are used for others as well. - {"FQ", "ATF"}, - } { - update(m.iso2, m.iso3) - } - for i, s := range regionISO.s { - if len(s) != 4 { - regionISO.s[i] = s + " " - } - } - b.writeConst("regionISO", tag.Index(regionISO.join())) - b.writeConst("altRegionISO3", altRegionISO3) - b.writeSlice("altRegionIDs", altRegionIDs) - - // Create list of deprecated regions. - // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only - // Transitionally-reserved mapping not included. - regionOldMap := stringSet{} - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { - regionOldMap.add(reg.Type) - regionOldMap.updateLater(reg.Type, reg.Replacement) - i, _ := regionISO.find(reg.Type) - j, _ := regionISO.find(reg.Replacement) - if k := m49map[i+isoOffset]; k == 0 { - m49map[i+isoOffset] = m49map[j+isoOffset] - } - } - } - b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { - return uint16(b.region.index(s)) - }) - // 3-digit region lookup, groupings. - for i := 1; i < isoOffset; i++ { - m := parseM49(b.region.s[i]) - m49map[i] = m - fromM49map[m] = i - } - b.writeSlice("m49", m49map) - - const ( - searchBits = 7 - regionBits = 9 - ) - if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) - } - b.writeSlice("m49Index", m49Index) - b.writeSlice("fromM49", fromM49) -} - -const ( - // TODO: put these lists in regionTypes as user data? Could be used for - // various optimizations and refinements and could be exposed in the API. - iso3166Except = "AC CP DG EA EU FX IC SU TA UK" - iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. - // DY and RH are actually not deleted, but indeterminately reserved. - iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" -) - -const ( - iso3166UserAssigned = 1 << iota - ccTLD - bcp47Region -) - -func find(list []string, s string) int { - for i, t := range list { - if t == s { - return i - } - } - return -1 -} - -// writeVariants generates per-variant information and creates a map from variant -// name to index value. We assign index values such that sorting multiple -// variants by index value will result in the correct order. -// There are two types of variants: specialized and general. Specialized variants -// are only applicable to certain language or language-script pairs. Generalized -// variants apply to any language. Generalized variants always sort after -// specialized variants. We will therefore always assign a higher index value -// to a generalized variant than any other variant. Generalized variants are -// sorted alphabetically among themselves. -// Specialized variants may also sort after other specialized variants. Such -// variants will be ordered after any of the variants they may follow. -// We assume that if a variant x is followed by a variant y, then for any prefix -// p of x, p-x is a prefix of y. This allows us to order tags based on the -// maximum of the length of any of its prefixes. -// TODO: it is possible to define a set of Prefix values on variants such that -// a total order cannot be defined to the point that this algorithm breaks. -// In other words, we cannot guarantee the same order of variants for the -// future using the same algorithm or for non-compliant combinations of -// variants. For this reason, consider using simple alphabetic sorting -// of variants and ignore Prefix restrictions altogether. -func (b *builder) writeVariant() { - generalized := stringSet{} - specialized := stringSet{} - specializedExtend := stringSet{} - // Collate the variants by type and check assumptions. - for _, v := range b.variant.slice() { - e := b.registry[v] - if len(e.prefix) == 0 { - generalized.add(v) - continue - } - c := strings.Split(e.prefix[0], "-") - hasScriptOrRegion := false - if len(c) > 1 { - _, hasScriptOrRegion = b.script.find(c[1]) - if !hasScriptOrRegion { - _, hasScriptOrRegion = b.region.find(c[1]) - - } - } - if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { - // Variant is preceded by a language. - specialized.add(v) - continue - } - // Variant is preceded by another variant. - specializedExtend.add(v) - prefix := c[0] + "-" - if hasScriptOrRegion { - prefix += c[1] - } - for _, p := range e.prefix { - // Verify that the prefix minus the last element is a prefix of the - // predecessor element. - i := strings.LastIndex(p, "-") - pred := b.registry[p[i+1:]] - if find(pred.prefix, p[:i]) < 0 { - log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) - } - // The sorting used below does not work in the general case. It works - // if we assume that variants that may be followed by others only have - // prefixes of the same length. Verify this. - count := strings.Count(p[:i], "-") - for _, q := range pred.prefix { - if c := strings.Count(q, "-"); c != count { - log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) - } - } - if !strings.HasPrefix(p, prefix) { - log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) - } - } - } - - // Sort extended variants. - a := specializedExtend.s - less := func(v, w string) bool { - // Sort by the maximum number of elements. - maxCount := func(s string) (max int) { - for _, p := range b.registry[s].prefix { - if c := strings.Count(p, "-"); c > max { - max = c - } - } - return - } - if cv, cw := maxCount(v), maxCount(w); cv != cw { - return cv < cw - } - // Sort by name as tie breaker. - return v < w - } - sort.Sort(funcSorter{less, sort.StringSlice(a)}) - specializedExtend.frozen = true - - // Create index from variant name to index. - variantIndex := make(map[string]uint8) - add := func(s []string) { - for _, v := range s { - variantIndex[v] = uint8(len(variantIndex)) - } - } - add(specialized.slice()) - add(specializedExtend.s) - numSpecialized := len(variantIndex) - add(generalized.slice()) - if n := len(variantIndex); n > 255 { - log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) - } - b.writeMap("variantIndex", variantIndex) - b.writeConst("variantNumSpecialized", numSpecialized) -} - -func (b *builder) writeLanguageInfo() { -} - -// writeLikelyData writes tables that are used both for finding parent relations and for -// language matching. Each entry contains additional bits to indicate the status of the -// data to know when it cannot be used for parent relations. -func (b *builder) writeLikelyData() { - const ( - isList = 1 << iota - scriptInFrom - regionInFrom - ) - type ( // generated types - likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 - } - likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 - } - likelyLangRegion struct { - lang uint16 - region uint16 - } - // likelyTag is used for getting likely tags for group regions, where - // the likely region might be a region contained in the group. - likelyTag struct { - lang uint16 - region uint16 - script uint8 - } - ) - var ( // generated variables - likelyRegionGroup = make([]likelyTag, len(b.groups)) - likelyLang = make([]likelyScriptRegion, len(b.lang.s)) - likelyRegion = make([]likelyLangScript, len(b.region.s)) - likelyScript = make([]likelyLangRegion, len(b.script.s)) - likelyLangList = []likelyScriptRegion{} - likelyRegionList = []likelyLangScript{} - ) - type fromTo struct { - from, to []string - } - langToOther := map[int][]fromTo{} - regionToOther := map[int][]fromTo{} - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - to := strings.Split(m.To, "_") - if len(to) != 3 { - log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) - } - if len(from) > 3 { - log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) - } - if from[0] != to[0] && from[0] != "und" { - log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) - } - if len(from) == 3 { - if from[2] != to[2] { - log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) - } - if from[0] != "und" { - log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) - } - } - if len(from) == 1 || from[0] != "und" { - id := 0 - if from[0] != "und" { - id = b.lang.index(from[0]) - } - langToOther[id] = append(langToOther[id], fromTo{from, to}) - } else if len(from) == 2 && len(from[1]) == 4 { - sid := b.script.index(from[1]) - likelyScript[sid].lang = uint16(b.langIndex(to[0])) - likelyScript[sid].region = uint16(b.region.index(to[2])) - } else { - r := b.region.index(from[len(from)-1]) - if id, ok := b.groups[r]; ok { - if from[0] != "und" { - log.Fatalf("region changed unexpectedly: %s -> %s", from, to) - } - likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) - likelyRegionGroup[id].script = uint8(b.script.index(to[1])) - likelyRegionGroup[id].region = uint16(b.region.index(to[2])) - } else { - regionToOther[r] = append(regionToOther[r], fromTo{from, to}) - } - } - } - b.writeType(likelyLangRegion{}) - b.writeSlice("likelyScript", likelyScript) - - for id := range b.lang.s { - list := langToOther[id] - if len(list) == 1 { - likelyLang[id].region = uint16(b.region.index(list[0].to[2])) - likelyLang[id].script = uint8(b.script.index(list[0].to[1])) - } else if len(list) > 1 { - likelyLang[id].flags = isList - likelyLang[id].region = uint16(len(likelyLangList)) - likelyLang[id].script = uint8(len(list)) - for _, x := range list { - flags := uint8(0) - if len(x.from) > 1 { - if x.from[1] == x.to[2] { - flags = regionInFrom - } else { - flags = scriptInFrom - } - } - likelyLangList = append(likelyLangList, likelyScriptRegion{ - region: uint16(b.region.index(x.to[2])), - script: uint8(b.script.index(x.to[1])), - flags: flags, - }) - } - } - } - // TODO: merge suppressScript data with this table. - b.writeType(likelyScriptRegion{}) - b.writeSlice("likelyLang", likelyLang) - b.writeSlice("likelyLangList", likelyLangList) - - for id := range b.region.s { - list := regionToOther[id] - if len(list) == 1 { - likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) - likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) - if len(list[0].from) > 2 { - likelyRegion[id].flags = scriptInFrom - } - } else if len(list) > 1 { - likelyRegion[id].flags = isList - likelyRegion[id].lang = uint16(len(likelyRegionList)) - likelyRegion[id].script = uint8(len(list)) - for i, x := range list { - if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { - log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) - } - x := likelyLangScript{ - lang: uint16(b.langIndex(x.to[0])), - script: uint8(b.script.index(x.to[1])), - } - if len(list[0].from) > 2 { - x.flags = scriptInFrom - } - likelyRegionList = append(likelyRegionList, x) - } - } - } - b.writeType(likelyLangScript{}) - b.writeSlice("likelyRegion", likelyRegion) - b.writeSlice("likelyRegionList", likelyRegionList) - - b.writeType(likelyTag{}) - b.writeSlice("likelyRegionGroup", likelyRegionGroup) -} - -func (b *builder) writeRegionInclusionData() { - var ( - // mm holds for each group the set of groups with a distance of 1. - mm = make(map[int][]index) - - // containment holds for each group the transitive closure of - // containment of other groups. - containment = make(map[index][]index) - ) - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - groupIdx := b.groups[group] - for _, mem := range strings.Split(g.Contains, " ") { - r := b.region.index(mem) - mm[r] = append(mm[r], groupIdx) - if g, ok := b.groups[r]; ok { - mm[group] = append(mm[group], g) - containment[groupIdx] = append(containment[groupIdx], g) - } - } - } - - regionContainment := make([]uint64, len(b.groups)) - for _, g := range b.groups { - l := containment[g] - - // Compute the transitive closure of containment. - for i := 0; i < len(l); i++ { - l = append(l, containment[l[i]]...) - } - - // Compute the bitmask. - regionContainment[g] = 1 << g - for _, v := range l { - regionContainment[g] |= 1 << v - } - } - b.writeSlice("regionContainment", regionContainment) - - regionInclusion := make([]uint8, len(b.region.s)) - bvs := make(map[uint64]index) - // Make the first bitvector positions correspond with the groups. - for r, i := range b.groups { - bv := uint64(1 << i) - for _, g := range mm[r] { - bv |= 1 << g - } - bvs[bv] = i - regionInclusion[r] = uint8(bvs[bv]) - } - for r := 1; r < len(b.region.s); r++ { - if _, ok := b.groups[r]; !ok { - bv := uint64(0) - for _, g := range mm[r] { - bv |= 1 << g - } - if bv == 0 { - // Pick the world for unspecified regions. - bv = 1 << b.groups[b.region.index("001")] - } - if _, ok := bvs[bv]; !ok { - bvs[bv] = index(len(bvs)) - } - regionInclusion[r] = uint8(bvs[bv]) - } - } - b.writeSlice("regionInclusion", regionInclusion) - regionInclusionBits := make([]uint64, len(bvs)) - for k, v := range bvs { - regionInclusionBits[v] = uint64(k) - } - // Add bit vectors for increasingly large distances until a fixed point is reached. - regionInclusionNext := []uint8{} - for i := 0; i < len(regionInclusionBits); i++ { - bits := regionInclusionBits[i] - next := bits - for i := uint(0); i < uint(len(b.groups)); i++ { - if bits&(1< 6 { - log.Fatalf("Too many groups: %d", i) - } - idToIndex[mv.Id] = uint8(i + 1) - // TODO: also handle '-' - for _, r := range strings.Split(mv.Value, "+") { - todo := []string{r} - for k := 0; k < len(todo); k++ { - r := todo[k] - regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) - todo = append(todo, regionHierarchy[r]...) - } - } - } - b.w.WriteVar("regionToGroups", regionToGroups) - - // maps language id to in- and out-of-group region. - paradigmLocales := [][3]uint16{} - locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") - for i := 0; i < len(locales); i += 2 { - x := [3]uint16{} - for j := 0; j < 2; j++ { - pc := strings.SplitN(locales[i+j], "-", 2) - x[0] = b.langIndex(pc[0]) - if len(pc) == 2 { - x[1+j] = uint16(b.regionIndex(pc[1])) - } - } - paradigmLocales = append(paradigmLocales, x) - } - b.w.WriteVar("paradigmLocales", paradigmLocales) - - b.w.WriteType(mutualIntelligibility{}) - b.w.WriteType(scriptIntelligibility{}) - b.w.WriteType(regionIntelligibility{}) - - matchLang := []mutualIntelligibility{} - matchScript := []scriptIntelligibility{} - matchRegion := []regionIntelligibility{} - // Convert the languageMatch entries in lists keyed by desired language. - for _, m := range lm[0].LanguageMatch { - // Different versions of CLDR use different separators. - desired := strings.Replace(m.Desired, "-", "_", -1) - supported := strings.Replace(m.Supported, "-", "_", -1) - d := strings.Split(desired, "_") - s := strings.Split(supported, "_") - if len(d) != len(s) { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - continue - } - distance, _ := strconv.ParseInt(m.Distance, 10, 8) - switch len(d) { - case 2: - if desired == supported && desired == "*_*" { - continue - } - // language-script pair. - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(d[0])), - haveLang: uint16(b.langIndex(s[0])), - wantScript: uint8(b.scriptIndex(d[1])), - haveScript: uint8(b.scriptIndex(s[1])), - distance: uint8(distance), - }) - if m.Oneway != "true" { - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(s[0])), - haveLang: uint16(b.langIndex(d[0])), - wantScript: uint8(b.scriptIndex(s[1])), - haveScript: uint8(b.scriptIndex(d[1])), - distance: uint8(distance), - }) - } - case 1: - if desired == supported && desired == "*" { - continue - } - if distance == 1 { - // nb == no is already handled by macro mapping. Check there - // really is only this case. - if d[0] != "no" || s[0] != "nb" { - log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) - } - continue - } - // TODO: consider dropping oneway field and just doubling the entry. - matchLang = append(matchLang, mutualIntelligibility{ - want: uint16(b.langIndex(d[0])), - have: uint16(b.langIndex(s[0])), - distance: uint8(distance), - oneway: m.Oneway == "true", - }) - case 3: - if desired == supported && desired == "*_*_*" { - continue - } - if desired != supported { - // This is now supported by CLDR, but only one case, which - // should already be covered by paradigm locales. For instance, - // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in - // testdata/CLDRLocaleMatcherTest.txt tests this. - if supported != "en_*_GB" { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - continue - } - ri := regionIntelligibility{ - lang: b.langIndex(d[0]), - distance: uint8(distance), - } - if d[1] != "*" { - ri.script = uint8(b.scriptIndex(d[1])) - } - switch { - case d[2] == "*": - ri.group = 0x80 // not contained in anything - case strings.HasPrefix(d[2], "$!"): - ri.group = 0x80 - d[2] = "$" + d[2][len("$!"):] - fallthrough - case strings.HasPrefix(d[2], "$"): - ri.group |= idToIndex[d[2]] - } - matchRegion = append(matchRegion, ri) - default: - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - } - sort.SliceStable(matchLang, func(i, j int) bool { - return matchLang[i].distance < matchLang[j].distance - }) - b.w.WriteComment(` - matchLang holds pairs of langIDs of base languages that are typically - mutually intelligible. Each pair is associated with a confidence and - whether the intelligibility goes one or both ways.`) - b.w.WriteVar("matchLang", matchLang) - - b.w.WriteComment(` - matchScript holds pairs of scriptIDs where readers of one script - can typically also read the other. Each is associated with a confidence.`) - sort.SliceStable(matchScript, func(i, j int) bool { - return matchScript[i].distance < matchScript[j].distance - }) - b.w.WriteVar("matchScript", matchScript) - - sort.SliceStable(matchRegion, func(i, j int) bool { - return matchRegion[i].distance < matchRegion[j].distance - }) - b.w.WriteVar("matchRegion", matchRegion) -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169c..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb994289..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa933..000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900..000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/tools/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go deleted file mode 100644 index 6b7052b89..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -// This file defines utilities for working with source positions. - -import ( - "fmt" - "go/ast" - "go/token" - "sort" -) - -// PathEnclosingInterval returns the node that encloses the source -// interval [start, end), and all its ancestors up to the AST root. -// -// The definition of "enclosing" used by this function considers -// additional whitespace abutting a node to be enclosed by it. -// In this example: -// -// z := x + y // add them -// <-A-> -// <----B-----> -// -// the ast.BinaryExpr(+) node is considered to enclose interval B -// even though its [Pos()..End()) is actually only interval A. -// This behaviour makes user interfaces more tolerant of imperfect -// input. -// -// This function treats tokens as nodes, though they are not included -// in the result. e.g. PathEnclosingInterval("+") returns the -// enclosing ast.BinaryExpr("x + y"). -// -// If start==end, the 1-char interval following start is used instead. -// -// The 'exact' result is true if the interval contains only path[0] -// and perhaps some adjacent whitespace. It is false if the interval -// overlaps multiple children of path[0], or if it contains only -// interior whitespace of path[0]. -// In this example: -// -// z := x + y // add them -// <--C--> <---E--> -// ^ -// D -// -// intervals C, D and E are inexact. C is contained by the -// z-assignment statement, because it spans three of its children (:=, -// x, +). So too is the 1-char interval D, because it contains only -// interior whitespace of the assignment. E is considered interior -// whitespace of the BlockStmt containing the assignment. -// -// Precondition: [start, end) both lie within the same file as root. -// TODO(adonovan): return (nil, false) in this case and remove precond. -// Requires FileSet; see loader.tokenFileContainsPos. -// -// Postcondition: path is never nil; it always contains at least 'root'. -// -func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { - // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging - - // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). - var visit func(node ast.Node) bool - visit = func(node ast.Node) bool { - path = append(path, node) - - nodePos := node.Pos() - nodeEnd := node.End() - - // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging - - // Intersect [start, end) with interval of node. - if start < nodePos { - start = nodePos - } - if end > nodeEnd { - end = nodeEnd - } - - // Find sole child that contains [start, end). - children := childrenOf(node) - l := len(children) - for i, child := range children { - // [childPos, childEnd) is unaugmented interval of child. - childPos := child.Pos() - childEnd := child.End() - - // [augPos, augEnd) is whitespace-augmented interval of child. - augPos := childPos - augEnd := childEnd - if i > 0 { - augPos = children[i-1].End() // start of preceding whitespace - } - if i < l-1 { - nextChildPos := children[i+1].Pos() - // Does [start, end) lie between child and next child? - if start >= augEnd && end <= nextChildPos { - return false // inexact match - } - augEnd = nextChildPos // end of following whitespace - } - - // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", - // i, augPos, augEnd, start, end) // debugging - - // Does augmented child strictly contain [start, end)? - if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) - } - - // Does [start, end) overlap multiple children? - // i.e. left-augmented child contains start - // but LR-augmented child does not contain end. - if start < childEnd && end > augEnd { - break - } - } - - // No single child contained [start, end), - // so node is the result. Is it exact? - - // (It's tempting to put this condition before the - // child loop, but it gives the wrong result in the - // case where a node (e.g. ExprStmt) and its sole - // child have equal intervals.) - if start == nodePos && end == nodeEnd { - return true // exact match - } - - return false // inexact: overlaps multiple children - } - - if start > end { - start, end = end, start - } - - if start < root.End() && end > root.Pos() { - if start == end { - end = start + 1 // empty interval => interval of size 1 - } - exact = visit(root) - - // Reverse the path: - for i, l := 0, len(path); i < l/2; i++ { - path[i], path[l-1-i] = path[l-1-i], path[i] - } - } else { - // Selection lies within whitespace preceding the - // first (or following the last) declaration in the file. - // The result nonetheless always includes the ast.File. - path = append(path, root) - } - - return -} - -// tokenNode is a dummy implementation of ast.Node for a single token. -// They are used transiently by PathEnclosingInterval but never escape -// this package. -// -type tokenNode struct { - pos token.Pos - end token.Pos -} - -func (n tokenNode) Pos() token.Pos { - return n.pos -} - -func (n tokenNode) End() token.Pos { - return n.end -} - -func tok(pos token.Pos, len int) ast.Node { - return tokenNode{pos, pos + token.Pos(len)} -} - -// childrenOf returns the direct non-nil children of ast.Node n. -// It may include fake ast.Node implementations for bare tokens. -// it is not safe to call (e.g.) ast.Walk on such nodes. -// -func childrenOf(n ast.Node) []ast.Node { - var children []ast.Node - - // First add nodes for all true subtrees. - ast.Inspect(n, func(node ast.Node) bool { - if node == n { // push n - return true // recur - } - if node != nil { // push child - children = append(children, node) - } - return false // no recursion - }) - - // Then add fake Nodes for bare tokens. - switch n := n.(type) { - case *ast.ArrayType: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Elt.End(), len("]"))) - - case *ast.AssignStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.BasicLit: - children = append(children, - tok(n.ValuePos, len(n.Value))) - - case *ast.BinaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) - - case *ast.BranchStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.CallExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - if n.Ellipsis != 0 { - children = append(children, tok(n.Ellipsis, len("..."))) - } - - case *ast.CaseClause: - if n.List == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.ChanType: - switch n.Dir { - case ast.RECV: - children = append(children, tok(n.Begin, len("<-chan"))) - case ast.SEND: - children = append(children, tok(n.Begin, len("chan<-"))) - case ast.RECV | ast.SEND: - children = append(children, tok(n.Begin, len("chan"))) - } - - case *ast.CommClause: - if n.Comm == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.Comment: - // nop - - case *ast.CommentGroup: - // nop - - case *ast.CompositeLit: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("{"))) - - case *ast.DeclStmt: - // nop - - case *ast.DeferStmt: - children = append(children, - tok(n.Defer, len("defer"))) - - case *ast.Ellipsis: - children = append(children, - tok(n.Ellipsis, len("..."))) - - case *ast.EmptyStmt: - // nop - - case *ast.ExprStmt: - // nop - - case *ast.Field: - // TODO(adonovan): Field.{Doc,Comment,Tag}? - - case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), - tok(n.Closing, len(")"))) - - case *ast.File: - // TODO test: Doc - children = append(children, - tok(n.Package, len("package"))) - - case *ast.ForStmt: - children = append(children, - tok(n.For, len("for"))) - - case *ast.FuncDecl: - // TODO(adonovan): FuncDecl.Comment? - - // Uniquely, FuncDecl breaks the invariant that - // preorder traversal yields tokens in lexical order: - // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. - // - // As a workaround, we inline the case for FuncType - // here and order things correctly. - // - children = nil // discard ast.Walk(FuncDecl) info subtrees - children = append(children, tok(n.Type.Func, len("func"))) - if n.Recv != nil { - children = append(children, n.Recv) - } - children = append(children, n.Name) - if n.Type.Params != nil { - children = append(children, n.Type.Params) - } - if n.Type.Results != nil { - children = append(children, n.Type.Results) - } - if n.Body != nil { - children = append(children, n.Body) - } - - case *ast.FuncLit: - // nop - - case *ast.FuncType: - if n.Func != 0 { - children = append(children, - tok(n.Func, len("func"))) - } - - case *ast.GenDecl: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - if n.Lparen != 0 { - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - } - - case *ast.GoStmt: - children = append(children, - tok(n.Go, len("go"))) - - case *ast.Ident: - children = append(children, - tok(n.NamePos, len(n.Name))) - - case *ast.IfStmt: - children = append(children, - tok(n.If, len("if"))) - - case *ast.ImportSpec: - // TODO(adonovan): ImportSpec.{Doc,EndPos}? - - case *ast.IncDecStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.IndexExpr: - children = append(children, - tok(n.Lbrack, len("{")), - tok(n.Rbrack, len("}"))) - - case *ast.InterfaceType: - children = append(children, - tok(n.Interface, len("interface"))) - - case *ast.KeyValueExpr: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.LabeledStmt: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.MapType: - children = append(children, - tok(n.Map, len("map"))) - - case *ast.ParenExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.RangeStmt: - children = append(children, - tok(n.For, len("for")), - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.ReturnStmt: - children = append(children, - tok(n.Return, len("return"))) - - case *ast.SelectStmt: - children = append(children, - tok(n.Select, len("select"))) - - case *ast.SelectorExpr: - // nop - - case *ast.SendStmt: - children = append(children, - tok(n.Arrow, len("<-"))) - - case *ast.SliceExpr: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Rbrack, len("]"))) - - case *ast.StarExpr: - children = append(children, tok(n.Star, len("*"))) - - case *ast.StructType: - children = append(children, tok(n.Struct, len("struct"))) - - case *ast.SwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.TypeAssertExpr: - children = append(children, - tok(n.Lparen-1, len(".")), - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.TypeSpec: - // TODO(adonovan): TypeSpec.{Doc,Comment}? - - case *ast.TypeSwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.UnaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.ValueSpec: - // TODO(adonovan): ValueSpec.{Doc,Comment}? - - case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: - // nop - } - - // TODO(adonovan): opt: merge the logic of ast.Inspect() into - // the switch above so we can make interleaved callbacks for - // both Nodes and Tokens in the right order and avoid the need - // to sort. - sort.Sort(byPos(children)) - - return children -} - -type byPos []ast.Node - -func (sl byPos) Len() int { - return len(sl) -} -func (sl byPos) Less(i, j int) bool { - return sl[i].Pos() < sl[j].Pos() -} -func (sl byPos) Swap(i, j int) { - sl[i], sl[j] = sl[j], sl[i] -} - -// NodeDescription returns a description of the concrete type of n suitable -// for a user interface. -// -// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, -// StarExpr) we could be much more specific given the path to the AST -// root. Perhaps we should do that. -// -func NodeDescription(n ast.Node) string { - switch n := n.(type) { - case *ast.ArrayType: - return "array type" - case *ast.AssignStmt: - return "assignment" - case *ast.BadDecl: - return "bad declaration" - case *ast.BadExpr: - return "bad expression" - case *ast.BadStmt: - return "bad statement" - case *ast.BasicLit: - return "basic literal" - case *ast.BinaryExpr: - return fmt.Sprintf("binary %s operation", n.Op) - case *ast.BlockStmt: - return "block" - case *ast.BranchStmt: - switch n.Tok { - case token.BREAK: - return "break statement" - case token.CONTINUE: - return "continue statement" - case token.GOTO: - return "goto statement" - case token.FALLTHROUGH: - return "fall-through statement" - } - case *ast.CallExpr: - if len(n.Args) == 1 && !n.Ellipsis.IsValid() { - return "function call (or conversion)" - } - return "function call" - case *ast.CaseClause: - return "case clause" - case *ast.ChanType: - return "channel type" - case *ast.CommClause: - return "communication clause" - case *ast.Comment: - return "comment" - case *ast.CommentGroup: - return "comment group" - case *ast.CompositeLit: - return "composite literal" - case *ast.DeclStmt: - return NodeDescription(n.Decl) + " statement" - case *ast.DeferStmt: - return "defer statement" - case *ast.Ellipsis: - return "ellipsis" - case *ast.EmptyStmt: - return "empty statement" - case *ast.ExprStmt: - return "expression statement" - case *ast.Field: - // Can be any of these: - // struct {x, y int} -- struct field(s) - // struct {T} -- anon struct field - // interface {I} -- interface embedding - // interface {f()} -- interface method - // func (A) func(B) C -- receiver, param(s), result(s) - return "field/method/parameter" - case *ast.FieldList: - return "field/method/parameter list" - case *ast.File: - return "source file" - case *ast.ForStmt: - return "for loop" - case *ast.FuncDecl: - return "function declaration" - case *ast.FuncLit: - return "function literal" - case *ast.FuncType: - return "function type" - case *ast.GenDecl: - switch n.Tok { - case token.IMPORT: - return "import declaration" - case token.CONST: - return "constant declaration" - case token.TYPE: - return "type declaration" - case token.VAR: - return "variable declaration" - } - case *ast.GoStmt: - return "go statement" - case *ast.Ident: - return "identifier" - case *ast.IfStmt: - return "if statement" - case *ast.ImportSpec: - return "import specification" - case *ast.IncDecStmt: - if n.Tok == token.INC { - return "increment statement" - } - return "decrement statement" - case *ast.IndexExpr: - return "index expression" - case *ast.InterfaceType: - return "interface type" - case *ast.KeyValueExpr: - return "key/value association" - case *ast.LabeledStmt: - return "statement label" - case *ast.MapType: - return "map type" - case *ast.Package: - return "package" - case *ast.ParenExpr: - return "parenthesized " + NodeDescription(n.X) - case *ast.RangeStmt: - return "range loop" - case *ast.ReturnStmt: - return "return statement" - case *ast.SelectStmt: - return "select statement" - case *ast.SelectorExpr: - return "selector" - case *ast.SendStmt: - return "channel send" - case *ast.SliceExpr: - return "slice expression" - case *ast.StarExpr: - return "*-operation" // load/store expr or pointer type - case *ast.StructType: - return "struct type" - case *ast.SwitchStmt: - return "switch statement" - case *ast.TypeAssertExpr: - return "type assertion" - case *ast.TypeSpec: - return "type specification" - case *ast.TypeSwitchStmt: - return "type switch" - case *ast.UnaryExpr: - return fmt.Sprintf("unary %s operation", n.Op) - case *ast.ValueSpec: - return "value specification" - - } - panic(fmt.Sprintf("unexpected node type: %T", n)) -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go deleted file mode 100644 index 3e4b19536..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package astutil contains common utilities for working with the Go AST. -package astutil // import "golang.org/x/tools/go/ast/astutil" - -import ( - "fmt" - "go/ast" - "go/token" - "strconv" - "strings" -) - -// AddImport adds the import path to the file f, if absent. -func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { - return AddNamedImport(fset, f, "", path) -} - -// AddNamedImport adds the import with the given name and path to the file f, if absent. -// If name is not empty, it is used to rename the import. -// -// For example, calling -// AddNamedImport(fset, f, "pathpkg", "path") -// adds -// import pathpkg "path" -func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { - if imports(f, name, path) { - return false - } - - newImport := &ast.ImportSpec{ - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: strconv.Quote(path), - }, - } - if name != "" { - newImport.Name = &ast.Ident{Name: name} - } - - // Find an import decl to add to. - // The goal is to find an existing import - // whose import path has the longest shared - // prefix with path. - var ( - bestMatch = -1 // length of longest shared prefix - lastImport = -1 // index in f.Decls of the file's final import decl - impDecl *ast.GenDecl // import decl containing the best match - impIndex = -1 // spec index in impDecl containing the best match - - isThirdPartyPath = isThirdParty(path) - ) - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if ok && gen.Tok == token.IMPORT { - lastImport = i - // Do not add to import "C", to avoid disrupting the - // association with its doc comment, breaking cgo. - if declImports(gen, "C") { - continue - } - - // Match an empty import decl if that's all that is available. - if len(gen.Specs) == 0 && bestMatch == -1 { - impDecl = gen - } - - // Compute longest shared prefix with imports in this group and find best - // matched import spec. - // 1. Always prefer import spec with longest shared prefix. - // 2. While match length is 0, - // - for stdlib package: prefer first import spec. - // - for third party package: prefer first third party import spec. - // We cannot use last import spec as best match for third party package - // because grouped imports are usually placed last by goimports -local - // flag. - // See issue #19190. - seenAnyThirdParty := false - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - p := importPath(impspec) - n := matchLen(p, path) - if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { - bestMatch = n - impDecl = gen - impIndex = j - } - seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) - } - } - } - - // If no import decl found, add one after the last import. - if impDecl == nil { - impDecl = &ast.GenDecl{ - Tok: token.IMPORT, - } - if lastImport >= 0 { - impDecl.TokPos = f.Decls[lastImport].End() - } else { - // There are no existing imports. - // Our new import, preceded by a blank line, goes after the package declaration - // and after the comment, if any, that starts on the same line as the - // package declaration. - impDecl.TokPos = f.Package - - file := fset.File(f.Package) - pkgLine := file.Line(f.Package) - for _, c := range f.Comments { - if file.Line(c.Pos()) > pkgLine { - break - } - // +2 for a blank line - impDecl.TokPos = c.End() + 2 - } - } - f.Decls = append(f.Decls, nil) - copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) - f.Decls[lastImport+1] = impDecl - } - - // Insert new import at insertAt. - insertAt := 0 - if impIndex >= 0 { - // insert after the found import - insertAt = impIndex + 1 - } - impDecl.Specs = append(impDecl.Specs, nil) - copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) - impDecl.Specs[insertAt] = newImport - pos := impDecl.Pos() - if insertAt > 0 { - // If there is a comment after an existing import, preserve the comment - // position by adding the new import after the comment. - if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { - pos = spec.Comment.End() - } else { - // Assign same position as the previous import, - // so that the sorter sees it as being in the same block. - pos = impDecl.Specs[insertAt-1].Pos() - } - } - if newImport.Name != nil { - newImport.Name.NamePos = pos - } - newImport.Path.ValuePos = pos - newImport.EndPos = pos - - // Clean up parens. impDecl contains at least one spec. - if len(impDecl.Specs) == 1 { - // Remove unneeded parens. - impDecl.Lparen = token.NoPos - } else if !impDecl.Lparen.IsValid() { - // impDecl needs parens added. - impDecl.Lparen = impDecl.Specs[0].Pos() - } - - f.Imports = append(f.Imports, newImport) - - if len(f.Decls) <= 1 { - return true - } - - // Merge all the import declarations into the first one. - var first *ast.GenDecl - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { - continue - } - if first == nil { - first = gen - continue // Don't touch the first one. - } - // We now know there is more than one package in this import - // declaration. Ensure that it ends up parenthesized. - first.Lparen = first.Pos() - // Move the imports of the other import declaration to the first one. - for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() - first.Specs = append(first.Specs, spec) - } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) - i-- - } - - return true -} - -func isThirdParty(importPath string) bool { - // Third party package import path usually contains "." (".com", ".org", ...) - // This logic is taken from golang.org/x/tools/imports package. - return strings.Contains(importPath, ".") -} - -// DeleteImport deletes the import path from the file f, if present. -// If there are duplicate import declarations, all matching ones are deleted. -func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { - return DeleteNamedImport(fset, f, "", path) -} - -// DeleteNamedImport deletes the import with the given name and path from the file f, if present. -// If there are duplicate import declarations, all matching ones are deleted. -func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup - - // Find the import nodes that import path, if any. - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT { - continue - } - for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) - if importName(impspec) != name || importPath(impspec) != path { - continue - } - - // We found an import spec that imports path. - // Delete it. - delspecs = append(delspecs, impspec) - deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] - - // If this was the last import spec in this decl, - // delete the decl, too. - if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] - i-- - break - } else if len(gen.Specs) == 1 { - if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) - } - if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) - } - for _, cg := range f.Comments { - // Found comment on the same line as the import spec. - if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) - break - } - } - - spec := gen.Specs[0].(*ast.ImportSpec) - - // Move the documentation right after the import decl. - if spec.Doc != nil { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - } - for _, cg := range f.Comments { - if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - break - } - } - } - if j > 0 { - lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) - lastLine := fset.Position(lastImpspec.Path.ValuePos).Line - line := fset.Position(impspec.Path.ValuePos).Line - - // We deleted an entry but now there may be - // a blank line-sized hole where the import was. - if line-lastLine > 1 { - // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. - // Do nothing. - } else if line != fset.File(gen.Rparen).LineCount() { - // There was no blank line. Close the hole. - fset.File(gen.Rparen).MergeLine(line) - } - } - j-- - } - } - - // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } - } - - // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } - - return -} - -// RewriteImport rewrites any import of path oldPath to path newPath. -func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { - for _, imp := range f.Imports { - if importPath(imp) == oldPath { - rewrote = true - // record old End, because the default is to compute - // it using the length of imp.Path.Value. - imp.EndPos = imp.End() - imp.Path.Value = strconv.Quote(newPath) - } - } - return -} - -// UsesImport reports whether a given import is used. -func UsesImport(f *ast.File, path string) (used bool) { - spec := importSpec(f, path) - if spec == nil { - return - } - - name := spec.Name.String() - switch name { - case "": - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - case "_", ".": - // Not sure if this import is used - err on the side of caution. - return true - } - - ast.Walk(visitFn(func(n ast.Node) { - sel, ok := n.(*ast.SelectorExpr) - if ok && isTopName(sel.X, name) { - used = true - } - }), f) - - return -} - -type visitFn func(node ast.Node) - -func (fn visitFn) Visit(node ast.Node) ast.Visitor { - fn(node) - return fn -} - -// imports reports whether f has an import with the specified name and path. -func imports(f *ast.File, name, path string) bool { - for _, s := range f.Imports { - if importName(s) == name && importPath(s) == path { - return true - } - } - return false -} - -// importSpec returns the import spec if f imports path, -// or nil otherwise. -func importSpec(f *ast.File, path string) *ast.ImportSpec { - for _, s := range f.Imports { - if importPath(s) == path { - return s - } - } - return nil -} - -// importName returns the name of s, -// or "" if the import is not named. -func importName(s *ast.ImportSpec) string { - if s.Name == nil { - return "" - } - return s.Name.Name -} - -// importPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func importPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err != nil { - return "" - } - return t -} - -// declImports reports whether gen contains an import of path. -func declImports(gen *ast.GenDecl, path string) bool { - if gen.Tok != token.IMPORT { - return false - } - for _, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if importPath(impspec) == path { - return true - } - } - return false -} - -// matchLen returns the length of the longest path segment prefix shared by x and y. -func matchLen(x, y string) int { - n := 0 - for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { - if x[i] == '/' { - n++ - } - } - return n -} - -// isTopName returns true if n is a top-level unresolved identifier with the given name. -func isTopName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.Name == name && id.Obj == nil -} - -// Imports returns the file imports grouped by paragraph. -func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { - var groups [][]*ast.ImportSpec - - for _, decl := range f.Decls { - genDecl, ok := decl.(*ast.GenDecl) - if !ok || genDecl.Tok != token.IMPORT { - break - } - - group := []*ast.ImportSpec{} - - var lastLine int - for _, spec := range genDecl.Specs { - importSpec := spec.(*ast.ImportSpec) - pos := importSpec.Path.ValuePos - line := fset.Position(pos).Line - if lastLine > 0 && pos > 0 && line-lastLine > 1 { - groups = append(groups, group) - group = []*ast.ImportSpec{} - } - group = append(group, importSpec) - lastLine = line - } - groups = append(groups, group) - } - - return groups -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go deleted file mode 100644 index cf72ea990..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -import ( - "fmt" - "go/ast" - "reflect" - "sort" -) - -// An ApplyFunc is invoked by Apply for each node n, even if n is nil, -// before and/or after the node's children, using a Cursor describing -// the current node and providing operations on it. -// -// The return value of ApplyFunc controls the syntax tree traversal. -// See Apply for details. -type ApplyFunc func(*Cursor) bool - -// Apply traverses a syntax tree recursively, starting with root, -// and calling pre and post for each node as described below. -// Apply returns the syntax tree, possibly modified. -// -// If pre is not nil, it is called for each node before the node's -// children are traversed (pre-order). If pre returns false, no -// children are traversed, and post is not called for that node. -// -// If post is not nil, and a prior call of pre didn't return false, -// post is called for each node after its children are traversed -// (post-order). If post returns false, traversal is terminated and -// Apply returns immediately. -// -// Only fields that refer to AST nodes are considered children; -// i.e., token.Pos, Scopes, Objects, and fields of basic types -// (strings, etc.) are ignored. -// -// Children are traversed in the order in which they appear in the -// respective node's struct definition. A package's files are -// traversed in the filenames' alphabetical order. -// -func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { - parent := &struct{ ast.Node }{root} - defer func() { - if r := recover(); r != nil && r != abort { - panic(r) - } - result = parent.Node - }() - a := &application{pre: pre, post: post} - a.apply(parent, "Node", nil, root) - return -} - -var abort = new(int) // singleton, to signal termination of Apply - -// A Cursor describes a node encountered during Apply. -// Information about the node and its parent is available -// from the Node, Parent, Name, and Index methods. -// -// If p is a variable of type and value of the current parent node -// c.Parent(), and f is the field identifier with name c.Name(), -// the following invariants hold: -// -// p.f == c.Node() if c.Index() < 0 -// p.f[c.Index()] == c.Node() if c.Index() >= 0 -// -// The methods Replace, Delete, InsertBefore, and InsertAfter -// can be used to change the AST without disrupting Apply. -type Cursor struct { - parent ast.Node - name string - iter *iterator // valid if non-nil - node ast.Node -} - -// Node returns the current Node. -func (c *Cursor) Node() ast.Node { return c.node } - -// Parent returns the parent of the current Node. -func (c *Cursor) Parent() ast.Node { return c.parent } - -// Name returns the name of the parent Node field that contains the current Node. -// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns -// the filename for the current Node. -func (c *Cursor) Name() string { return c.name } - -// Index reports the index >= 0 of the current Node in the slice of Nodes that -// contains it, or a value < 0 if the current Node is not part of a slice. -// The index of the current node changes if InsertBefore is called while -// processing the current node. -func (c *Cursor) Index() int { - if c.iter != nil { - return c.iter.index - } - return -1 -} - -// field returns the current node's parent field value. -func (c *Cursor) field() reflect.Value { - return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) -} - -// Replace replaces the current Node with n. -// The replacement node is not walked by Apply. -func (c *Cursor) Replace(n ast.Node) { - if _, ok := c.node.(*ast.File); ok { - file, ok := n.(*ast.File) - if !ok { - panic("attempt to replace *ast.File with non-*ast.File") - } - c.parent.(*ast.Package).Files[c.name] = file - return - } - - v := c.field() - if i := c.Index(); i >= 0 { - v = v.Index(i) - } - v.Set(reflect.ValueOf(n)) -} - -// Delete deletes the current Node from its containing slice. -// If the current Node is not part of a slice, Delete panics. -// As a special case, if the current node is a package file, -// Delete removes it from the package's Files map. -func (c *Cursor) Delete() { - if _, ok := c.node.(*ast.File); ok { - delete(c.parent.(*ast.Package).Files, c.name) - return - } - - i := c.Index() - if i < 0 { - panic("Delete node not contained in slice") - } - v := c.field() - l := v.Len() - reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) - v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) - v.SetLen(l - 1) - c.iter.step-- -} - -// InsertAfter inserts n after the current Node in its containing slice. -// If the current Node is not part of a slice, InsertAfter panics. -// Apply does not walk n. -func (c *Cursor) InsertAfter(n ast.Node) { - i := c.Index() - if i < 0 { - panic("InsertAfter node not contained in slice") - } - v := c.field() - v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) - l := v.Len() - reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) - v.Index(i + 1).Set(reflect.ValueOf(n)) - c.iter.step++ -} - -// InsertBefore inserts n before the current Node in its containing slice. -// If the current Node is not part of a slice, InsertBefore panics. -// Apply will not walk n. -func (c *Cursor) InsertBefore(n ast.Node) { - i := c.Index() - if i < 0 { - panic("InsertBefore node not contained in slice") - } - v := c.field() - v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) - l := v.Len() - reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) - v.Index(i).Set(reflect.ValueOf(n)) - c.iter.index++ -} - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor - iter iterator -} - -func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { - // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { - n = nil - } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead - saved := a.cursor - a.cursor.parent = parent - a.cursor.name = name - a.cursor.iter = iter - a.cursor.node = n - - if a.pre != nil && !a.pre(&a.cursor) { - a.cursor = saved - return - } - - // walk children - // (the order of the cases matches the order of the corresponding node types in go/ast) - switch n := n.(type) { - case nil: - // nothing to do - - // Comments and fields - case *ast.Comment: - // nothing to do - - case *ast.CommentGroup: - if n != nil { - a.applyList(n, "List") - } - - case *ast.Field: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Names") - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Tag", nil, n.Tag) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.FieldList: - a.applyList(n, "List") - - // Expressions - case *ast.BadExpr, *ast.Ident, *ast.BasicLit: - // nothing to do - - case *ast.Ellipsis: - a.apply(n, "Elt", nil, n.Elt) - - case *ast.FuncLit: - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Body", nil, n.Body) - - case *ast.CompositeLit: - a.apply(n, "Type", nil, n.Type) - a.applyList(n, "Elts") - - case *ast.ParenExpr: - a.apply(n, "X", nil, n.X) - - case *ast.SelectorExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Sel", nil, n.Sel) - - case *ast.IndexExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Index", nil, n.Index) - - case *ast.SliceExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Low", nil, n.Low) - a.apply(n, "High", nil, n.High) - a.apply(n, "Max", nil, n.Max) - - case *ast.TypeAssertExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Type", nil, n.Type) - - case *ast.CallExpr: - a.apply(n, "Fun", nil, n.Fun) - a.applyList(n, "Args") - - case *ast.StarExpr: - a.apply(n, "X", nil, n.X) - - case *ast.UnaryExpr: - a.apply(n, "X", nil, n.X) - - case *ast.BinaryExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Y", nil, n.Y) - - case *ast.KeyValueExpr: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - - // Types - case *ast.ArrayType: - a.apply(n, "Len", nil, n.Len) - a.apply(n, "Elt", nil, n.Elt) - - case *ast.StructType: - a.apply(n, "Fields", nil, n.Fields) - - case *ast.FuncType: - a.apply(n, "Params", nil, n.Params) - a.apply(n, "Results", nil, n.Results) - - case *ast.InterfaceType: - a.apply(n, "Methods", nil, n.Methods) - - case *ast.MapType: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - - case *ast.ChanType: - a.apply(n, "Value", nil, n.Value) - - // Statements - case *ast.BadStmt: - // nothing to do - - case *ast.DeclStmt: - a.apply(n, "Decl", nil, n.Decl) - - case *ast.EmptyStmt: - // nothing to do - - case *ast.LabeledStmt: - a.apply(n, "Label", nil, n.Label) - a.apply(n, "Stmt", nil, n.Stmt) - - case *ast.ExprStmt: - a.apply(n, "X", nil, n.X) - - case *ast.SendStmt: - a.apply(n, "Chan", nil, n.Chan) - a.apply(n, "Value", nil, n.Value) - - case *ast.IncDecStmt: - a.apply(n, "X", nil, n.X) - - case *ast.AssignStmt: - a.applyList(n, "Lhs") - a.applyList(n, "Rhs") - - case *ast.GoStmt: - a.apply(n, "Call", nil, n.Call) - - case *ast.DeferStmt: - a.apply(n, "Call", nil, n.Call) - - case *ast.ReturnStmt: - a.applyList(n, "Results") - - case *ast.BranchStmt: - a.apply(n, "Label", nil, n.Label) - - case *ast.BlockStmt: - a.applyList(n, "List") - - case *ast.IfStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Cond", nil, n.Cond) - a.apply(n, "Body", nil, n.Body) - a.apply(n, "Else", nil, n.Else) - - case *ast.CaseClause: - a.applyList(n, "List") - a.applyList(n, "Body") - - case *ast.SwitchStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Tag", nil, n.Tag) - a.apply(n, "Body", nil, n.Body) - - case *ast.TypeSwitchStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Assign", nil, n.Assign) - a.apply(n, "Body", nil, n.Body) - - case *ast.CommClause: - a.apply(n, "Comm", nil, n.Comm) - a.applyList(n, "Body") - - case *ast.SelectStmt: - a.apply(n, "Body", nil, n.Body) - - case *ast.ForStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Cond", nil, n.Cond) - a.apply(n, "Post", nil, n.Post) - a.apply(n, "Body", nil, n.Body) - - case *ast.RangeStmt: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - a.apply(n, "X", nil, n.X) - a.apply(n, "Body", nil, n.Body) - - // Declarations - case *ast.ImportSpec: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Path", nil, n.Path) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.ValueSpec: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Names") - a.apply(n, "Type", nil, n.Type) - a.applyList(n, "Values") - a.apply(n, "Comment", nil, n.Comment) - - case *ast.TypeSpec: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.BadDecl: - // nothing to do - - case *ast.GenDecl: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Specs") - - case *ast.FuncDecl: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Recv", nil, n.Recv) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Body", nil, n.Body) - - // Files and packages - case *ast.File: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.applyList(n, "Decls") - // Don't walk n.Comments; they have either been walked already if - // they are Doc comments, or they can be easily walked explicitly. - - case *ast.Package: - // collect and sort names for reproducible behavior - var names []string - for name := range n.Files { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - a.apply(n, name, nil, n.Files[name]) - } - - default: - panic(fmt.Sprintf("Apply: unexpected node type %T", n)) - } - - if a.post != nil && !a.post(&a.cursor) { - panic(abort) - } - - a.cursor = saved -} - -// An iterator controls iteration over a slice of nodes. -type iterator struct { - index, step int -} - -func (a *application) applyList(parent ast.Node, name string) { - // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead - saved := a.iter - a.iter.index = 0 - for { - // must reload parent.name each time, since cursor modifications might change it - v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) - if a.iter.index >= v.Len() { - break - } - - // element x may be nil in a bad AST - be cautious - var x ast.Node - if e := v.Index(a.iter.index); e.IsValid() { - x = e.Interface().(ast.Node) - } - - a.iter.step = 1 - a.apply(parent, name, &a.iter, x) - a.iter.index += a.iter.step - } - a.iter = saved -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go deleted file mode 100644 index 763062982..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package astutil - -import "go/ast" - -// Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go deleted file mode 100644 index 98b3987b9..000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -// -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" - -import ( - "bufio" - "bytes" - "fmt" - "go/token" - "go/types" - "io" - "io/ioutil" - - "golang.org/x/tools/go/internal/gcimporter" -) - -// Find returns the name of an object (.o) or archive (.a) file -// containing type information for the specified import path, -// using the workspace layout conventions of go/build. -// If no file was found, an empty filename is returned. -// -// A relative srcDir is interpreted relative to the current working directory. -// -// Find also returns the package's resolved (canonical) import path, -// reflecting the effects of srcDir and vendoring on importPath. -func Find(importPath, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) -} - -// NewReader returns a reader for the export data section of an object -// (.o) or archive (.a) file read from r. The new reader may provide -// additional trailing data beyond the end of the export data. -func NewReader(r io.Reader) (io.Reader, error) { - buf := bufio.NewReader(r) - _, err := gcimporter.FindExportData(buf) - // If we ever switch to a zip-like archive format with the ToC - // at the end, we can return the correct portion of export data, - // but for now we must return the entire rest of the file. - return buf, err -} - -// Read reads export data from in, decodes it, and returns type -// information for the package. -// The package name is specified by path. -// File position information is added to fset. -// -// Read may inspect and add to the imports map to ensure that references -// within the export data to other packages are consistent. The caller -// must ensure that imports[path] does not exist, or exists but is -// incomplete (see types.Package.Complete), and Read inserts the -// resulting package into this map entry. -// -// On return, the state of the reader is undefined. -func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - data, err := ioutil.ReadAll(in) - if err != nil { - return nil, fmt.Errorf("reading export data for %q: %v", path, err) - } - - if bytes.HasPrefix(data, []byte("!")) { - return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) - } - - // The App Engine Go runtime v1.6 uses the old export data format. - // TODO(adonovan): delete once v1.7 has been around for a while. - if bytes.HasPrefix(data, []byte("package ")) { - return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) - } - - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - } - - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err -} - -// Write writes encoded type information for the specified package to out. -// The FileSet provides file position information for named objects. -func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - b, err := gcimporter.BExportData(fset, pkg) - if err != nil { - return err - } - _, err = out.Write(b) - return err -} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go deleted file mode 100644 index efe221e7e..000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcexportdata - -import ( - "fmt" - "go/token" - "go/types" - "os" -) - -// NewImporter returns a new instance of the types.Importer interface -// that reads type information from export data files written by gc. -// The Importer also satisfies types.ImporterFrom. -// -// Export data files are located using "go build" workspace conventions -// and the build.Default context. -// -// Use this importer instead of go/importer.For("gc", ...) to avoid the -// version-skew problems described in the documentation of this package, -// or to control the FileSet or access the imports map populated during -// package loading. -// -func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { - return importer{fset, imports} -} - -type importer struct { - fset *token.FileSet - imports map[string]*types.Package -} - -func (imp importer) Import(importPath string) (*types.Package, error) { - return imp.ImportFrom(importPath, "", 0) -} - -func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { - filename, path := Find(importPath, srcDir) - if filename == "" { - if importPath == "unsafe" { - // Even for unsafe, call Find first in case - // the package was vendored. - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %s", importPath) - } - - if pkg, ok := imp.imports[path]; ok && pkg.Complete() { - return pkg, nil // cache hit - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - f.Close() - if err != nil { - // add file name to error - err = fmt.Errorf("reading export data: %s: %v", filename, err) - } - }() - - r, err := NewReader(f) - if err != nil { - return nil, err - } - - return Read(r, imp.fset, imp.imports, path) -} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go deleted file mode 100644 index 2713dce64..000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/main.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// The gcexportdata command is a diagnostic tool that displays the -// contents of gc export data files. -package main - -import ( - "flag" - "fmt" - "go/token" - "go/types" - "log" - "os" - - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/go/types/typeutil" -) - -var packageFlag = flag.String("package", "", "alternative package to print") - -func main() { - log.SetPrefix("gcexportdata: ") - log.SetFlags(0) - flag.Usage = func() { - fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") - } - flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - os.Exit(2) - } - filename := flag.Args()[0] - - f, err := os.Open(filename) - if err != nil { - log.Fatal(err) - } - - r, err := gcexportdata.NewReader(f) - if err != nil { - log.Fatalf("%s: %s", filename, err) - } - - // Decode the package. - const primary = "" - imports := make(map[string]*types.Package) - fset := token.NewFileSet() - pkg, err := gcexportdata.Read(r, fset, imports, primary) - if err != nil { - log.Fatalf("%s: %s", filename, err) - } - - // Optionally select an indirectly mentioned package. - if *packageFlag != "" { - pkg = imports[*packageFlag] - if pkg == nil { - fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", - filename, *packageFlag) - for p := range imports { - if p != primary { - fmt.Fprintf(os.Stderr, "\t%s\n", p) - } - } - os.Exit(1) - } - } - - // Print all package-level declarations, including non-exported ones. - fmt.Printf("package %s\n", pkg.Name()) - for _, imp := range pkg.Imports() { - fmt.Printf("import %q\n", imp.Path()) - } - qual := func(p *types.Package) string { - if pkg == p { - return "" - } - return p.Name() - } - scope := pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - fmt.Printf("%s: %s\n", - fset.Position(obj.Pos()), - types.ObjectString(obj, qual)) - - // For types, print each method. - if _, ok := obj.(*types.TypeName); ok { - for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { - fmt.Printf("%s: %s\n", - fset.Position(method.Obj().Pos()), - types.SelectionString(method, qual)) - } - } - } -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go deleted file mode 100644 index a807d0aaa..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// If trace is set, debugging output is printed to std out. -const trace = false // default: false - -// Current export format version. Increase with each format change. -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !ast.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !ast.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if ast.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !ast.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go deleted file mode 100644 index e3c310782..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go +++ /dev/null @@ -1,1036 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. - -package gcimporter - -import ( - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*token.File), - }, - } - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - -func errorf(format string, args ...interface{}) { - panic(fmt.Sprintf(format, args...)) -} - -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - -const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go - -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line) -} - -// Synthesize a token.Pos -type fakeFileSet struct { - fset *token.FileSet - files map[string]*token.File -} - -func (s *fakeFileSet) pos(file string, line int) token.Pos { - // Since we don't know the set of needed file positions, we - // reserve maxlines positions per file. - const maxlines = 64 * 1024 - f := s.files[file] - if f == nil { - f = s.fset.AddFile(file, -1, maxlines) - s.files[file] = f - // Allocate the fake linebreak indices on first use. - // TODO(adonovan): opt: save ~512KB using a more complex scheme? - fakeLinesOnce.Do(func() { - fakeLines = make([]int, maxlines) - for i := range fakeLines { - fakeLines[i] = i - } - }) - f.SetLines(fakeLines) - } - - if line > maxlines { - line = 1 - } - - // Treat the file as if it contained only newlines - // and column=1: use the line number as the offset. - return f.Pos(line - 1) -} - -var ( - fakeLines []int - fakeLinesOnce sync.Once -) - -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - -func chanDir(d int) types.ChanDir { - // tag values must match the constants in cmd/compile/internal/gc/go.go - switch d { - case 1 /* Crecv */ : - return types.RecvOnly - case 2 /* Csend */ : - return types.SendOnly - case 3 /* Cboth */ : - return types.SendRecv - default: - errorf("unexpected channel dir %d", d) - return 0 - } -} - -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - if predecl == nil { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - } - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go deleted file mode 100644 index f33dc5613..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. - -package gcimporter - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" -) - -func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) - if err != nil { - return - } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - size, err = strconv.Atoi(s) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") - return - } - name = strings.TrimSpace(string(hdr[:16])) - return -} - -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// -func FindExportData(r *bufio.Reader) (hdr string, err error) { - // Read first line to make sure this is an object file. - line, err := r.ReadSlice('\n') - if err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, _, err = readGopackHeader(r); err != nil { - return - } - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - } - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") - return - } - - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - } - hdr = string(line) - - return -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go deleted file mode 100644 index 9cf186605..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ /dev/null @@ -1,1078 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, -// but it also contains the original source-based importer code for Go1.6. -// Once we stop supporting 1.6, we can remove that code. - -// Package gcimporter provides various functions for reading -// gc-generated object files that can be used to implement the -// Importer interface defined by the Go 1.5 standard library package. -package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" - -import ( - "bufio" - "errors" - "fmt" - "go/build" - "go/constant" - "go/token" - "go/types" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "text/scanner" -) - -// debugging/development support -const debug = false - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -// -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - id = path // make sure we have an id to print in error message - return - } - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// ImportData imports a package by reading the gc-generated export data, -// adds the corresponding package object to the packages map indexed by id, -// and returns the object. -// -// The packages map must contains all packages already imported. The data -// reader position must be the beginning of the export data section. The -// filename is only used in error messages. -// -// If packages[id] contains the completely imported package, that package -// can be used directly, and there is no need to call this function (but -// there is also no harm but for extra time used). -// -func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { - // support for parser error handling - defer func() { - switch r := recover().(type) { - case nil: - // nothing to do - case importError: - err = r - default: - panic(r) // internal error - } - }() - - var p parser - p.init(filename, id, data, packages) - pkg = p.parseExport() - - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -// -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { - var rc io.ReadCloser - var filename, id string - if lookup != nil { - // With custom lookup specified, assume that caller has - // converted path to a canonical import path for use in the map. - if path == "unsafe" { - return types.Unsafe, nil - } - id = path - - // No need to re-import if the package was imported completely before. - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - f, err := lookup(path) - if err != nil { - return nil, err - } - rc = f - } else { - filename, id = FindPkg(path, srcDir) - if filename == "" { - if path == "unsafe" { - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %q", id) - } - - // no need to re-import if the package was imported completely before - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // add file name to error - err = fmt.Errorf("%s: %v", filename, err) - } - }() - rc = f - } - defer rc.Close() - - var hdr string - buf := bufio.NewReader(rc) - if hdr, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$\n": - // Work-around if we don't have a filename; happens only if lookup != nil. - // Either way, the filename is only needed for importer error messages, so - // this is fine. - if filename == "" { - filename = path - } - return ImportData(packages, filename, id, buf) - - case "$$B\n": - var data []byte - data, err = ioutil.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err = IImportData(fset, packages, data[1:], id) - } else { - _, pkg, err = BImportData(fset, packages, data, id) - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -// ---------------------------------------------------------------------------- -// Parser - -// TODO(gri) Imported objects don't have position information. -// Ideally use the debug table line info; alternatively -// create some fake position (or the position of the -// import). That way error messages referring to imported -// objects can print meaningful information. - -// parser parses the exports inside a gc compiler-produced -// object/archive file and populates its scope with the results. -type parser struct { - scanner scanner.Scanner - tok rune // current token - lit string // literal string; only valid for Ident, Int, String tokens - id string // package id of imported package - sharedPkgs map[string]*types.Package // package id -> package object (across importer) - localPkgs map[string]*types.Package // package id -> package object (just this package) -} - -func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { - p.scanner.Init(src) - p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } - p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments - p.scanner.Whitespace = 1<<'\t' | 1<<' ' - p.scanner.Filename = filename // for good error messages - p.next() - p.id = id - p.sharedPkgs = packages - if debug { - // check consistency of packages map - for _, pkg := range packages { - if pkg.Name() == "" { - fmt.Printf("no package name for %s\n", pkg.Path()) - } - } - } -} - -func (p *parser) next() { - p.tok = p.scanner.Scan() - switch p.tok { - case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': - p.lit = p.scanner.TokenText() - default: - p.lit = "" - } - if debug { - fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) - } -} - -func declTypeName(pkg *types.Package, name string) *types.TypeName { - scope := pkg.Scope() - if obj := scope.Lookup(name); obj != nil { - return obj.(*types.TypeName) - } - obj := types.NewTypeName(token.NoPos, pkg, name, nil) - // a named type may be referred to before the underlying type - // is known - set it up - types.NewNamed(obj, nil, nil) - scope.Insert(obj) - return obj -} - -// ---------------------------------------------------------------------------- -// Error handling - -// Internal errors are boxed as importErrors. -type importError struct { - pos scanner.Position - err error -} - -func (e importError) Error() string { - return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) -} - -func (p *parser) error(err interface{}) { - if s, ok := err.(string); ok { - err = errors.New(s) - } - // panic with a runtime.Error if err is not an error - panic(importError{p.scanner.Pos(), err.(error)}) -} - -func (p *parser) errorf(format string, args ...interface{}) { - p.error(fmt.Sprintf(format, args...)) -} - -func (p *parser) expect(tok rune) string { - lit := p.lit - if p.tok != tok { - p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) - } - p.next() - return lit -} - -func (p *parser) expectSpecial(tok string) { - sep := 'x' // not white space - i := 0 - for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - i++ - } - if i < len(tok) { - p.errorf("expected %q, got %q", tok, tok[0:i]) - } -} - -func (p *parser) expectKeyword(keyword string) { - lit := p.expect(scanner.Ident) - if lit != keyword { - p.errorf("expected keyword %s, got %q", keyword, lit) - } -} - -// ---------------------------------------------------------------------------- -// Qualified and unqualified names - -// PackageId = string_lit . -// -func (p *parser) parsePackageId() string { - id, err := strconv.Unquote(p.expect(scanner.String)) - if err != nil { - p.error(err) - } - // id == "" stands for the imported package id - // (only known at time of package installation) - if id == "" { - id = p.id - } - return id -} - -// PackageName = ident . -// -func (p *parser) parsePackageName() string { - return p.expect(scanner.Ident) -} - -// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . -func (p *parser) parseDotIdent() string { - ident := "" - if p.tok != scanner.Int { - sep := 'x' // not white space - for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { - ident += p.lit - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - } - } - if ident == "" { - p.expect(scanner.Ident) // use expect() for error handling - } - return ident -} - -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . -// -func (p *parser) parseQualifiedName() (id, name string) { - p.expect('@') - id = p.parsePackageId() - p.expect('.') - // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. - if p.tok == '?' { - p.next() - } else { - name = p.parseDotIdent() - } - return -} - -// getPkg returns the package for a given id. If the package is -// not found, create the package and add it to the p.localPkgs -// and p.sharedPkgs maps. name is the (expected) name of the -// package. If name == "", the package name is expected to be -// set later via an import clause in the export data. -// -// id identifies a package, usually by a canonical package path like -// "encoding/json" but possibly by a non-canonical import path like -// "./json". -// -func (p *parser) getPkg(id, name string) *types.Package { - // package unsafe is not in the packages maps - handle explicitly - if id == "unsafe" { - return types.Unsafe - } - - pkg := p.localPkgs[id] - if pkg == nil { - // first import of id from this package - pkg = p.sharedPkgs[id] - if pkg == nil { - // first import of id by this importer; - // add (possibly unnamed) pkg to shared packages - pkg = types.NewPackage(id, name) - p.sharedPkgs[id] = pkg - } - // add (possibly unnamed) pkg to local packages - if p.localPkgs == nil { - p.localPkgs = make(map[string]*types.Package) - } - p.localPkgs[id] = pkg - } else if name != "" { - // package exists already and we have an expected package name; - // make sure names match or set package name if necessary - if pname := pkg.Name(); pname == "" { - pkg.SetName(name) - } else if pname != name { - p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) - } - } - return pkg -} - -// parseExportedName is like parseQualifiedName, but -// the package id is resolved to an imported *types.Package. -// -func (p *parser) parseExportedName() (pkg *types.Package, name string) { - id, name := p.parseQualifiedName() - pkg = p.getPkg(id, "") - return -} - -// ---------------------------------------------------------------------------- -// Types - -// BasicType = identifier . -// -func (p *parser) parseBasicType() types.Type { - id := p.expect(scanner.Ident) - obj := types.Universe.Lookup(id) - if obj, ok := obj.(*types.TypeName); ok { - return obj.Type() - } - p.errorf("not a basic type: %s", id) - return nil -} - -// ArrayType = "[" int_lit "]" Type . -// -func (p *parser) parseArrayType(parent *types.Package) types.Type { - // "[" already consumed and lookahead known not to be "]" - lit := p.expect(scanner.Int) - p.expect(']') - elem := p.parseType(parent) - n, err := strconv.ParseInt(lit, 10, 64) - if err != nil { - p.error(err) - } - return types.NewArray(elem, n) -} - -// MapType = "map" "[" Type "]" Type . -// -func (p *parser) parseMapType(parent *types.Package) types.Type { - p.expectKeyword("map") - p.expect('[') - key := p.parseType(parent) - p.expect(']') - elem := p.parseType(parent) - return types.NewMap(key, elem) -} - -// Name = identifier | "?" | QualifiedName . -// -// For unqualified and anonymous names, the returned package is the parent -// package unless parent == nil, in which case the returned package is the -// package being imported. (The parent package is not nil if the the name -// is an unqualified struct field or interface method name belonging to a -// type declared in another package.) -// -// For qualified names, the returned package is nil (and not created if -// it doesn't exist yet) unless materializePkg is set (which creates an -// unnamed package with valid package path). In the latter case, a -// subsequent import clause is expected to provide a name for the package. -// -func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { - pkg = parent - if pkg == nil { - pkg = p.sharedPkgs[p.id] - } - switch p.tok { - case scanner.Ident: - name = p.lit - p.next() - case '?': - // anonymous - p.next() - case '@': - // exported name prefixed with package path - pkg = nil - var id string - id, name = p.parseQualifiedName() - if materializePkg { - pkg = p.getPkg(id, "") - } - default: - p.error("name expected") - } - return -} - -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - -// Field = Name Type [ string_lit ] . -// -func (p *parser) parseField(parent *types.Package) (*types.Var, string) { - pkg, name := p.parseName(parent, true) - - if name == "_" { - // Blank fields should be package-qualified because they - // are unexported identifiers, but gc does not qualify them. - // Assuming that the ident belongs to the current package - // causes types to change during re-exporting, leading - // to spurious "can't assign A to B" errors from go/types. - // As a workaround, pretend all blank fields belong - // to the same unique dummy package. - const blankpkg = "<_>" - pkg = p.getPkg(blankpkg, blankpkg) - } - - typ := p.parseType(parent) - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - p.errorf("anonymous field expected") - } - anonymous = true - } - tag := "" - if p.tok == scanner.String { - s := p.expect(scanner.String) - var err error - tag, err = strconv.Unquote(s) - if err != nil { - p.errorf("invalid struct tag %s: %s", s, err) - } - } - return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag -} - -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . -// -func (p *parser) parseStructType(parent *types.Package) types.Type { - var fields []*types.Var - var tags []string - - p.expectKeyword("struct") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - fld, tag := p.parseField(parent) - if tag != "" && tags == nil { - tags = make([]string, i) - } - if tags != nil { - tags = append(tags, tag) - } - fields = append(fields, fld) - } - p.expect('}') - - return types.NewStruct(fields, tags) -} - -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . -// -func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { - _, name := p.parseName(nil, false) - // remove gc-specific parameter numbering - if i := strings.Index(name, "·"); i >= 0 { - name = name[:i] - } - if p.tok == '.' { - p.expectSpecial("...") - isVariadic = true - } - typ := p.parseType(nil) - if isVariadic { - typ = types.NewSlice(typ) - } - // ignore argument tag (e.g. "noescape") - if p.tok == scanner.String { - p.next() - } - // TODO(gri) should we provide a package? - par = types.NewVar(token.NoPos, nil, name, typ) - return -} - -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . -// -func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { - p.expect('(') - for p.tok != ')' && p.tok != scanner.EOF { - if len(list) > 0 { - p.expect(',') - } - par, variadic := p.parseParameter() - list = append(list, par) - if variadic { - if isVariadic { - p.error("... not on final argument") - } - isVariadic = true - } - } - p.expect(')') - - return -} - -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . -// -func (p *parser) parseSignature(recv *types.Var) *types.Signature { - params, isVariadic := p.parseParameters() - - // optional result type - var results []*types.Var - if p.tok == '(' { - var variadic bool - results, variadic = p.parseParameters() - if variadic { - p.error("... not permitted on result type") - } - } - - return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) -} - -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . -// -// The methods of embedded interfaces are always "inlined" -// by the compiler and thus embedded interfaces are never -// visible in the export data. -// -func (p *parser) parseInterfaceType(parent *types.Package) types.Type { - var methods []*types.Func - - p.expectKeyword("interface") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - pkg, name := p.parseName(parent, true) - sig := p.parseSignature(nil) - methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) - } - p.expect('}') - - // Complete requires the type's embedded interfaces to be fully defined, - // but we do not define any - return types.NewInterface(methods, nil).Complete() -} - -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . -// -func (p *parser) parseChanType(parent *types.Package) types.Type { - dir := types.SendRecv - if p.tok == scanner.Ident { - p.expectKeyword("chan") - if p.tok == '<' { - p.expectSpecial("<-") - dir = types.SendOnly - } - } else { - p.expectSpecial("<-") - p.expectKeyword("chan") - dir = types.RecvOnly - } - elem := p.parseType(parent) - return types.NewChan(dir, elem) -} - -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . -// -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . -// -func (p *parser) parseType(parent *types.Package) types.Type { - switch p.tok { - case scanner.Ident: - switch p.lit { - default: - return p.parseBasicType() - case "struct": - return p.parseStructType(parent) - case "func": - // FuncType - p.next() - return p.parseSignature(nil) - case "interface": - return p.parseInterfaceType(parent) - case "map": - return p.parseMapType(parent) - case "chan": - return p.parseChanType(parent) - } - case '@': - // TypeName - pkg, name := p.parseExportedName() - return declTypeName(pkg, name).Type() - case '[': - p.next() // look ahead - if p.tok == ']' { - // SliceType - p.next() - return types.NewSlice(p.parseType(parent)) - } - return p.parseArrayType(parent) - case '*': - // PointerType - p.next() - return types.NewPointer(p.parseType(parent)) - case '<': - return p.parseChanType(parent) - case '(': - // "(" Type ")" - p.next() - typ := p.parseType(parent) - p.expect(')') - return typ - } - p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) - return nil -} - -// ---------------------------------------------------------------------------- -// Declarations - -// ImportDecl = "import" PackageName PackageId . -// -func (p *parser) parseImportDecl() { - p.expectKeyword("import") - name := p.parsePackageName() - p.getPkg(p.parsePackageId(), name) -} - -// int_lit = [ "+" | "-" ] { "0" ... "9" } . -// -func (p *parser) parseInt() string { - s := "" - switch p.tok { - case '-': - s = "-" - p.next() - case '+': - p.next() - } - return s + p.expect(scanner.Int) -} - -// number = int_lit [ "p" int_lit ] . -// -func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { - // mantissa - mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) - if mant == nil { - panic("invalid mantissa") - } - - if p.lit == "p" { - // exponent (base 2) - p.next() - exp, err := strconv.ParseInt(p.parseInt(), 10, 0) - if err != nil { - p.error(err) - } - if exp < 0 { - denom := constant.MakeInt64(1) - denom = constant.Shift(denom, token.SHL, uint(-exp)) - typ = types.Typ[types.UntypedFloat] - val = constant.BinaryOp(mant, token.QUO, denom) - return - } - if exp > 0 { - mant = constant.Shift(mant, token.SHL, uint(exp)) - } - typ = types.Typ[types.UntypedFloat] - val = mant - return - } - - typ = types.Typ[types.UntypedInt] - val = mant - return -} - -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . -// -func (p *parser) parseConstDecl() { - p.expectKeyword("const") - pkg, name := p.parseExportedName() - - var typ0 types.Type - if p.tok != '=' { - // constant types are never structured - no need for parent type - typ0 = p.parseType(nil) - } - - p.expect('=') - var typ types.Type - var val constant.Value - switch p.tok { - case scanner.Ident: - // bool_lit - if p.lit != "true" && p.lit != "false" { - p.error("expected true or false") - } - typ = types.Typ[types.UntypedBool] - val = constant.MakeBool(p.lit == "true") - p.next() - - case '-', scanner.Int: - // int_lit - typ, val = p.parseNumber() - - case '(': - // complex_lit or rune_lit - p.next() - if p.tok == scanner.Char { - p.next() - p.expect('+') - typ = types.Typ[types.UntypedRune] - _, val = p.parseNumber() - p.expect(')') - break - } - _, re := p.parseNumber() - p.expect('+') - _, im := p.parseNumber() - p.expectKeyword("i") - p.expect(')') - typ = types.Typ[types.UntypedComplex] - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - case scanner.Char: - // rune_lit - typ = types.Typ[types.UntypedRune] - val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) - p.next() - - case scanner.String: - // string_lit - typ = types.Typ[types.UntypedString] - val = constant.MakeFromLiteral(p.lit, token.STRING, 0) - p.next() - - default: - p.errorf("expected literal got %s", scanner.TokenString(p.tok)) - } - - if typ0 == nil { - typ0 = typ - } - - pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) -} - -// TypeDecl = "type" ExportedName Type . -// -func (p *parser) parseTypeDecl() { - p.expectKeyword("type") - pkg, name := p.parseExportedName() - obj := declTypeName(pkg, name) - - // The type object may have been imported before and thus already - // have a type associated with it. We still need to parse the type - // structure, but throw it away if the object already has a type. - // This ensures that all imports refer to the same type object for - // a given type declaration. - typ := p.parseType(pkg) - - if name := obj.Type().(*types.Named); name.Underlying() == nil { - name.SetUnderlying(typ) - } -} - -// VarDecl = "var" ExportedName Type . -// -func (p *parser) parseVarDecl() { - p.expectKeyword("var") - pkg, name := p.parseExportedName() - typ := p.parseType(pkg) - pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) -} - -// Func = Signature [ Body ] . -// Body = "{" ... "}" . -// -func (p *parser) parseFunc(recv *types.Var) *types.Signature { - sig := p.parseSignature(recv) - if p.tok == '{' { - p.next() - for i := 1; i > 0; p.next() { - switch p.tok { - case '{': - i++ - case '}': - i-- - } - } - } - return sig -} - -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . -// -func (p *parser) parseMethodDecl() { - // "func" already consumed - p.expect('(') - recv, _ := p.parseParameter() // receiver - p.expect(')') - - // determine receiver base type object - base := deref(recv.Type()).(*types.Named) - - // parse method name, signature, and possibly inlined body - _, name := p.parseName(nil, false) - sig := p.parseFunc(recv) - - // methods always belong to the same package as the base type object - pkg := base.Obj().Pkg() - - // add method to type unless type was imported before - // and method exists already - // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. - base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) -} - -// FuncDecl = "func" ExportedName Func . -// -func (p *parser) parseFuncDecl() { - // "func" already consumed - pkg, name := p.parseExportedName() - typ := p.parseFunc(nil) - pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) -} - -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . -// -func (p *parser) parseDecl() { - if p.tok == scanner.Ident { - switch p.lit { - case "import": - p.parseImportDecl() - case "const": - p.parseConstDecl() - case "type": - p.parseTypeDecl() - case "var": - p.parseVarDecl() - case "func": - p.next() // look ahead - if p.tok == '(' { - p.parseMethodDecl() - } else { - p.parseFuncDecl() - } - } - } - p.expect('\n') -} - -// ---------------------------------------------------------------------------- -// Export - -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . -// -func (p *parser) parseExport() *types.Package { - p.expectKeyword("package") - name := p.parsePackageName() - if p.tok == scanner.Ident && p.lit == "safe" { - // package was compiled with -u option - ignore - p.next() - } - p.expect('\n') - - pkg := p.getPkg(p.id, name) - - for p.tok != '$' && p.tok != scanner.EOF { - p.parseDecl() - } - - if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { - // don't call next()/expect() since reading past the - // export data may cause scanner errors (e.g. NUL chars) - p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) - } - - if n := p.scanner.ErrorCount; n != 0 { - p.errorf("expected no scanner errors, got %d", n) - } - - // Record all locally referenced packages as imports. - var imports []*types.Package - for id, pkg2 := range p.localPkgs { - if pkg2.Name() == "" { - p.errorf("%s package has no name", id) - } - if id == p.id { - continue // avoid self-edge - } - imports = append(imports, pkg2) - } - sort.Sort(byPath(imports)) - pkg.SetImports(imports) - - // package was imported completely and without errors - pkg.MarkComplete() - - return pkg -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go deleted file mode 100644 index be671c79b..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ /dev/null @@ -1,723 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. - -// +build go1.11 - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "go/ast" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "reflect" - "sort" -) - -// Current indexed export format version. Increase with each format change. -// 0: Go1.11 encoding -const iexportVersion = 0 - -// IExportData returns the binary export data for pkg. -// If no file set is provided, position info will be missing. -func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - - p := iexporter{ - out: bytes.NewBuffer(nil), - fset: fset, - allPkgs: map[*types.Package]bool{}, - stringIndex: map[string]uint64{}, - declIndex: map[types.Object]uint64{}, - typIndex: map[types.Type]uint64{}, - } - - for i, pt := range predeclared() { - p.typIndex[pt] = uint64(i) - } - if len(p.typIndex) > predeclReserved { - panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) - } - - // Initialize work queue with exported declarations. - scope := pkg.Scope() - for _, name := range scope.Names() { - if ast.IsExported(name) { - p.pushDecl(scope.Lookup(name)) - } - } - - // Loop until no more work. - for !p.declTodo.empty() { - p.doDecl(p.declTodo.popHead()) - } - - // Append indices to data0 section. - dataLen := uint64(p.data0.Len()) - w := p.newWriter() - w.writeIndex(p.declIndex, pkg) - w.flush() - - // Assemble header. - var hdr intWriter - hdr.WriteByte('i') - hdr.uint64(iexportVersion) - hdr.uint64(uint64(p.strings.Len())) - hdr.uint64(dataLen) - - // Flush output. - io.Copy(p.out, &hdr) - io.Copy(p.out, &p.strings) - io.Copy(p.out, &p.data0) - - return p.out.Bytes(), nil -} - -// writeIndex writes out an object index. mainIndex indicates whether -// we're writing out the main index, which is also read by -// non-compiler tools and includes a complete package description -// (i.e., name and height). -func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { - // Build a map from packages to objects from that package. - pkgObjs := map[*types.Package][]types.Object{} - - // For the main index, make sure to include every package that - // we reference, even if we're not exporting (or reexporting) - // any symbols from it. - pkgObjs[localpkg] = nil - for pkg := range w.p.allPkgs { - pkgObjs[pkg] = nil - } - - for obj := range index { - pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) - } - - var pkgs []*types.Package - for pkg, objs := range pkgObjs { - pkgs = append(pkgs, pkg) - - sort.Slice(objs, func(i, j int) bool { - return objs[i].Name() < objs[j].Name() - }) - } - - sort.Slice(pkgs, func(i, j int) bool { - return pkgs[i].Path() < pkgs[j].Path() - }) - - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.string(pkg.Path()) - w.string(pkg.Name()) - w.uint64(uint64(0)) // package height is not needed for go/types - - objs := pkgObjs[pkg] - w.uint64(uint64(len(objs))) - for _, obj := range objs { - w.string(obj.Name()) - w.uint64(index[obj]) - } - } -} - -type iexporter struct { - fset *token.FileSet - out *bytes.Buffer - - // allPkgs tracks all packages that have been referenced by - // the export data, so we can ensure to include them in the - // main index. - allPkgs map[*types.Package]bool - - declTodo objQueue - - strings intWriter - stringIndex map[string]uint64 - - data0 intWriter - declIndex map[types.Object]uint64 - typIndex map[types.Type]uint64 -} - -// stringOff returns the offset of s within the string section. -// If not already present, it's added to the end. -func (p *iexporter) stringOff(s string) uint64 { - off, ok := p.stringIndex[s] - if !ok { - off = uint64(p.strings.Len()) - p.stringIndex[s] = off - - p.strings.uint64(uint64(len(s))) - p.strings.WriteString(s) - } - return off -} - -// pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(obj types.Object) { - // Package unsafe is known to the compiler and predeclared. - assert(obj.Pkg() != types.Unsafe) - - if _, ok := p.declIndex[obj]; ok { - return - } - - p.declIndex[obj] = ^uint64(0) // mark n present in work queue - p.declTodo.pushTail(obj) -} - -// exportWriter handles writing out individual data section chunks. -type exportWriter struct { - p *iexporter - - data intWriter - currPkg *types.Package - prevFile string - prevLine int64 -} - -func (p *iexporter) doDecl(obj types.Object) { - w := p.newWriter() - w.setPkg(obj.Pkg(), false) - - switch obj := obj.(type) { - case *types.Var: - w.tag('V') - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - - case *types.Func: - sig, _ := obj.Type().(*types.Signature) - if sig.Recv() != nil { - panic(internalErrorf("unexpected method: %v", sig)) - } - w.tag('F') - w.pos(obj.Pos()) - w.signature(sig) - - case *types.Const: - w.tag('C') - w.pos(obj.Pos()) - w.value(obj.Type(), obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - w.tag('A') - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - break - } - - // Defined type. - w.tag('T') - w.pos(obj.Pos()) - - underlying := obj.Type().Underlying() - w.typ(underlying, obj.Pkg()) - - t := obj.Type() - if types.IsInterface(t) { - break - } - - named, ok := t.(*types.Named) - if !ok { - panic(internalErrorf("%s is not a defined type", t)) - } - - n := named.NumMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := named.Method(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.param(sig.Recv()) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected object: %v", obj)) - } - - p.declIndex[obj] = w.flush() -} - -func (w *exportWriter) tag(tag byte) { - w.data.WriteByte(tag) -} - -func (w *exportWriter) pos(pos token.Pos) { - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - - // When file is the same as the last position (common case), - // we can save a few bytes by delta encoding just the line - // number. - // - // Note: Because data objects may be read out of order (or not - // at all), we can only apply delta encoding within a single - // object. This is handled implicitly by tracking prevFile and - // prevLine as fields of exportWriter. - - if file == w.prevFile { - delta := line - w.prevLine - w.int64(delta) - if delta == deltaNewFile { - w.int64(-1) - } - } else { - w.int64(deltaNewFile) - w.int64(line) // line >= 0 - w.string(file) - w.prevFile = file - } - w.prevLine = line -} - -func (w *exportWriter) pkg(pkg *types.Package) { - // Ensure any referenced packages are declared in the main index. - w.p.allPkgs[pkg] = true - - w.string(pkg.Path()) -} - -func (w *exportWriter) qualifiedIdent(obj types.Object) { - // Ensure any referenced declarations are written out too. - w.p.pushDecl(obj) - - w.string(obj.Name()) - w.pkg(obj.Pkg()) -} - -func (w *exportWriter) typ(t types.Type, pkg *types.Package) { - w.data.uint64(w.p.typOff(t, pkg)) -} - -func (p *iexporter) newWriter() *exportWriter { - return &exportWriter{p: p} -} - -func (w *exportWriter) flush() uint64 { - off := uint64(w.p.data0.Len()) - io.Copy(&w.p.data0, &w.data) - return off -} - -func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { - off, ok := p.typIndex[t] - if !ok { - w := p.newWriter() - w.doTyp(t, pkg) - off = predeclReserved + w.flush() - p.typIndex[t] = off - } - return off -} - -func (w *exportWriter) startType(k itag) { - w.data.uint64(uint64(k)) -} - -func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { - switch t := t.(type) { - case *types.Named: - w.startType(definedType) - w.qualifiedIdent(t.Obj()) - - case *types.Pointer: - w.startType(pointerType) - w.typ(t.Elem(), pkg) - - case *types.Slice: - w.startType(sliceType) - w.typ(t.Elem(), pkg) - - case *types.Array: - w.startType(arrayType) - w.uint64(uint64(t.Len())) - w.typ(t.Elem(), pkg) - - case *types.Chan: - w.startType(chanType) - // 1 RecvOnly; 2 SendOnly; 3 SendRecv - var dir uint64 - switch t.Dir() { - case types.RecvOnly: - dir = 1 - case types.SendOnly: - dir = 2 - case types.SendRecv: - dir = 3 - } - w.uint64(dir) - w.typ(t.Elem(), pkg) - - case *types.Map: - w.startType(mapType) - w.typ(t.Key(), pkg) - w.typ(t.Elem(), pkg) - - case *types.Signature: - w.startType(signatureType) - w.setPkg(pkg, true) - w.signature(t) - - case *types.Struct: - w.startType(structType) - w.setPkg(pkg, true) - - n := t.NumFields() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - f := t.Field(i) - w.pos(f.Pos()) - w.string(f.Name()) - w.typ(f.Type(), pkg) - w.bool(f.Embedded()) - w.string(t.Tag(i)) // note (or tag) - } - - case *types.Interface: - w.startType(interfaceType) - w.setPkg(pkg, true) - - n := t.NumEmbeddeds() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - f := t.Embedded(i) - w.pos(f.Obj().Pos()) - w.typ(f.Obj().Type(), f.Obj().Pkg()) - } - - n = t.NumExplicitMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := t.ExplicitMethod(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) - } -} - -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) - } - - w.currPkg = pkg -} - -func (w *exportWriter) signature(sig *types.Signature) { - w.paramList(sig.Params()) - w.paramList(sig.Results()) - if sig.Params().Len() > 0 { - w.bool(sig.Variadic()) - } -} - -func (w *exportWriter) paramList(tup *types.Tuple) { - n := tup.Len() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - w.param(tup.At(i)) - } -} - -func (w *exportWriter) param(obj types.Object) { - w.pos(obj.Pos()) - w.localIdent(obj) - w.typ(obj.Type(), obj.Pkg()) -} - -func (w *exportWriter) value(typ types.Type, v constant.Value) { - w.typ(typ, nil) - - switch v.Kind() { - case constant.Bool: - w.bool(constant.BoolVal(v)) - case constant.Int: - var i big.Int - if i64, exact := constant.Int64Val(v); exact { - i.SetInt64(i64) - } else if ui64, exact := constant.Uint64Val(v); exact { - i.SetUint64(ui64) - } else { - i.SetString(v.ExactString(), 10) - } - w.mpint(&i, typ) - case constant.Float: - f := constantToFloat(v) - w.mpfloat(f, typ) - case constant.Complex: - w.mpfloat(constantToFloat(constant.Real(v)), typ) - w.mpfloat(constantToFloat(constant.Imag(v)), typ) - case constant.String: - w.string(constant.StringVal(v)) - case constant.Unknown: - // package contains type errors - default: - panic(internalErrorf("unexpected value %v (%T)", v, v)) - } -} - -// constantToFloat converts a constant.Value with kind constant.Float to a -// big.Float. -func constantToFloat(x constant.Value) *big.Float { - assert(x.Kind() == constant.Float) - // Use the same floating-point precision (512) as cmd/compile - // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). - const mpprec = 512 - var f big.Float - f.SetPrec(mpprec) - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - n := valueToRat(num) - d := valueToRat(denom) - f.SetRat(n.Quo(n, d)) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - _, ok := f.SetString(x.ExactString()) - assert(ok) - } - return &f -} - -// mpint exports a multi-precision integer. -// -// For unsigned types, small values are written out as a single -// byte. Larger values are written out as a length-prefixed big-endian -// byte string, where the length prefix is encoded as its complement. -// For example, bytes 0, 1, and 2 directly represent the integer -// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, -// 2-, and 3-byte big-endian string follow. -// -// Encoding for signed types use the same general approach as for -// unsigned types, except small values use zig-zag encoding and the -// bottom bit of length prefix byte for large values is reserved as a -// sign bit. -// -// The exact boundary between small and large encodings varies -// according to the maximum number of bytes needed to encode a value -// of type typ. As a special case, 8-bit types are always encoded as a -// single byte. -// -// TODO(mdempsky): Is this level of complexity really worthwhile? -func (w *exportWriter) mpint(x *big.Int, typ types.Type) { - basic, ok := typ.Underlying().(*types.Basic) - if !ok { - panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) - } - - signed, maxBytes := intSize(basic) - - negative := x.Sign() < 0 - if !signed && negative { - panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) - } - - b := x.Bytes() - if len(b) > 0 && b[0] == 0 { - panic(internalErrorf("leading zeros")) - } - if uint(len(b)) > maxBytes { - panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) - } - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - // Check if x can use small value encoding. - if len(b) <= 1 { - var ux uint - if len(b) == 1 { - ux = uint(b[0]) - } - if signed { - ux <<= 1 - if negative { - ux-- - } - } - if ux < maxSmall { - w.data.WriteByte(byte(ux)) - return - } - } - - n := 256 - uint(len(b)) - if signed { - n = 256 - 2*uint(len(b)) - if negative { - n |= 1 - } - } - if n < maxSmall || n >= 256 { - panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) - } - - w.data.WriteByte(byte(n)) - w.data.Write(b) -} - -// mpfloat exports a multi-precision floating point number. -// -// The number's value is decomposed into mantissa × 2**exponent, where -// mantissa is an integer. The value is written out as mantissa (as a -// multi-precision integer) and then the exponent, except exponent is -// omitted if mantissa is zero. -func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { - if f.IsInf() { - panic("infinite constant") - } - - // Break into f = mant × 2**exp, with 0.5 <= mant < 1. - var mant big.Float - exp := int64(f.MantExp(&mant)) - - // Scale so that mant is an integer. - prec := mant.MinPrec() - mant.SetMantExp(&mant, int(prec)) - exp -= int64(prec) - - manti, acc := mant.Int(nil) - if acc != big.Exact { - panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) - } - w.mpint(manti, typ) - if manti.Sign() != 0 { - w.int64(exp) - } -} - -func (w *exportWriter) bool(b bool) bool { - var x uint64 - if b { - x = 1 - } - w.uint64(x) - return b -} - -func (w *exportWriter) int64(x int64) { w.data.int64(x) } -func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } -func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } - -func (w *exportWriter) localIdent(obj types.Object) { - // Anonymous parameters. - if obj == nil { - w.string("") - return - } - - name := obj.Name() - if name == "_" { - w.string("_") - return - } - - w.string(name) -} - -type intWriter struct { - bytes.Buffer -} - -func (w *intWriter) int64(x int64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutVarint(buf[:], x) - w.Write(buf[:n]) -} - -func (w *intWriter) uint64(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - w.Write(buf[:n]) -} - -func assert(cond bool) { - if !cond { - panic("internal error: assertion failed") - } -} - -// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. - -// objQueue is a FIFO queue of types.Object. The zero value of objQueue is -// a ready-to-use empty queue. -type objQueue struct { - ring []types.Object - head, tail int -} - -// empty returns true if q contains no Nodes. -func (q *objQueue) empty() bool { - return q.head == q.tail -} - -// pushTail appends n to the tail of the queue. -func (q *objQueue) pushTail(obj types.Object) { - if len(q.ring) == 0 { - q.ring = make([]types.Object, 16) - } else if q.head+len(q.ring) == q.tail { - // Grow the ring. - nring := make([]types.Object, len(q.ring)*2) - // Copy the old elements. - part := q.ring[q.head%len(q.ring):] - if q.tail-q.head <= len(part) { - part = part[:q.tail-q.head] - copy(nring, part) - } else { - pos := copy(nring, part) - copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) - } - q.ring, q.head, q.tail = nring, 0, q.tail-q.head - } - - q.ring[q.tail%len(q.ring)] = obj - q.tail++ -} - -// popHead pops a node from the head of the queue. It panics if q is empty. -func (q *objQueue) popHead() types.Object { - if q.empty() { - panic("dequeue empty") - } - obj := q.ring[q.head%len(q.ring)] - q.head++ - return obj -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go deleted file mode 100644 index 3cb7ae5b9..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "io" - "sort" -) - -type intReader struct { - *bytes.Reader - path string -} - -func (r *intReader) int64() int64 { - i, err := binary.ReadVarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -func (r *intReader) uint64() uint64 { - i, err := binary.ReadUvarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -const predeclReserved = 32 - -type itag uint64 - -const ( - // Types - definedType itag = iota - pointerType - sliceType - arrayType - chanType - mapType - signatureType - structType - interfaceType -) - -// IImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - const currentVersion = 0 - version := -1 - defer func() { - if e := recover(); e != nil { - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - r := &intReader{bytes.NewReader(data), path} - - version = int(r.uint64()) - switch version { - case currentVersion: - default: - errorf("unknown iexport format version %d", version) - } - - sLen := int64(r.uint64()) - dLen := int64(r.uint64()) - - whence, _ := r.Seek(0, io.SeekCurrent) - stringData := data[whence : whence+sLen] - declData := data[whence+sLen : whence+sLen+dLen] - r.Seek(sLen+dLen, io.SeekCurrent) - - p := iimporter{ - ipath: path, - - stringData: stringData, - stringCache: make(map[uint64]string), - pkgCache: make(map[uint64]*types.Package), - - declData: declData, - pkgIndex: make(map[*types.Package]map[string]uint64), - typCache: make(map[uint64]types.Type), - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*token.File), - }, - } - - for i, pt := range predeclared() { - p.typCache[uint64(i)] = pt - } - - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { - pkgPathOff := r.uint64() - pkgPath := p.stringAt(pkgPathOff) - pkgName := p.stringAt(r.uint64()) - _ = r.uint64() // package height; unused by go/types - - if pkgPath == "" { - pkgPath = path - } - pkg := imports[pkgPath] - if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - - p.pkgCache[pkgPathOff] = pkg - - nameIndex := make(map[string]uint64) - for nSyms := r.uint64(); nSyms > 0; nSyms-- { - name := p.stringAt(r.uint64()) - nameIndex[name] = r.uint64() - } - - p.pkgIndex[pkg] = nameIndex - pkgList[i] = pkg - } - var localpkg *types.Package - for _, pkg := range pkgList { - if pkg.Path() == path { - localpkg = pkg - } - } - - names := make([]string, 0, len(p.pkgIndex[localpkg])) - for name := range p.pkgIndex[localpkg] { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - p.doDecl(localpkg, name) - } - - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) - sort.Sort(byPath(list)) - localpkg.SetImports(list) - - // package was imported completely and without errors - localpkg.MarkComplete() - - consumed, _ := r.Seek(0, io.SeekCurrent) - return int(consumed), localpkg, nil -} - -type iimporter struct { - ipath string - - stringData []byte - stringCache map[uint64]string - pkgCache map[uint64]*types.Package - - declData []byte - pkgIndex map[*types.Package]map[string]uint64 - typCache map[uint64]types.Type - - fake fakeFileSet - interfaceList []*types.Interface -} - -func (p *iimporter) doDecl(pkg *types.Package, name string) { - // See if we've already imported this declaration. - if obj := pkg.Scope().Lookup(name); obj != nil { - return - } - - off, ok := p.pkgIndex[pkg][name] - if !ok { - errorf("%v.%v not in index", pkg, name) - } - - r := &importReader{p: p, currPkg: pkg} - r.declReader.Reset(p.declData[off:]) - - r.obj(name) -} - -func (p *iimporter) stringAt(off uint64) string { - if s, ok := p.stringCache[off]; ok { - return s - } - - slen, n := binary.Uvarint(p.stringData[off:]) - if n <= 0 { - errorf("varint failed") - } - spos := off + uint64(n) - s := string(p.stringData[spos : spos+slen]) - p.stringCache[off] = s - return s -} - -func (p *iimporter) pkgAt(off uint64) *types.Package { - if pkg, ok := p.pkgCache[off]; ok { - return pkg - } - path := p.stringAt(off) - errorf("missing package %q in %q", path, p.ipath) - return nil -} - -func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { - if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { - return t - } - - if off < predeclReserved { - errorf("predeclared type missing from cache: %v", off) - } - - r := &importReader{p: p} - r.declReader.Reset(p.declData[off-predeclReserved:]) - t := r.doType(base) - - if base == nil || !isInterface(t) { - p.typCache[off] = t - } - return t -} - -type importReader struct { - p *iimporter - declReader bytes.Reader - currPkg *types.Package - prevFile string - prevLine int64 -} - -func (r *importReader) obj(name string) { - tag := r.byte() - pos := r.pos() - - switch tag { - case 'A': - typ := r.typ() - - r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) - - case 'C': - typ, val := r.value() - - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) - - case 'F': - sig := r.signature(nil) - - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) - - case 'T': - // Types can be recursive. We need to setup a stub - // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) - named := types.NewNamed(obj, nil, nil) - r.declare(obj) - - underlying := r.p.typAt(r.uint64(), named).Underlying() - named.SetUnderlying(underlying) - - if !isInterface(underlying) { - for n := r.uint64(); n > 0; n-- { - mpos := r.pos() - mname := r.ident() - recv := r.param() - msig := r.signature(recv) - - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) - } - } - - case 'V': - typ := r.typ() - - r.declare(types.NewVar(pos, r.currPkg, name, typ)) - - default: - errorf("unexpected tag: %v", tag) - } -} - -func (r *importReader) declare(obj types.Object) { - obj.Pkg().Scope().Insert(obj) -} - -func (r *importReader) value() (typ types.Type, val constant.Value) { - typ = r.typ() - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - val = constant.MakeBool(r.bool()) - - case types.IsString: - val = constant.MakeString(r.string()) - - case types.IsInteger: - val = r.mpint(b) - - case types.IsFloat: - val = r.mpfloat(b) - - case types.IsComplex: - re := r.mpfloat(b) - im := r.mpfloat(b) - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - default: - if b.Kind() == types.Invalid { - val = constant.MakeUnknown() - return - } - errorf("unexpected type %v", typ) // panics - panic("unreachable") - } - - return -} - -func intSize(b *types.Basic) (signed bool, maxBytes uint) { - if (b.Info() & types.IsUntyped) != 0 { - return true, 64 - } - - switch b.Kind() { - case types.Float32, types.Complex64: - return true, 3 - case types.Float64, types.Complex128: - return true, 7 - } - - signed = (b.Info() & types.IsUnsigned) == 0 - switch b.Kind() { - case types.Int8, types.Uint8: - maxBytes = 1 - case types.Int16, types.Uint16: - maxBytes = 2 - case types.Int32, types.Uint32: - maxBytes = 4 - default: - maxBytes = 8 - } - - return -} - -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - n, _ := r.declReader.ReadByte() - if uint(n) < maxSmall { - v := int64(n) - if signed { - v >>= 1 - if n&1 != 0 { - v = ^v - } - } - return constant.MakeInt64(v) - } - - v := -n - if signed { - v = -(n &^ 1) >> 1 - } - if v < 1 || uint(v) > maxBytes { - errorf("weird decoding: %v, %v => %v", n, signed, v) - } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) - if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x - } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - } - return x -} - -func (r *importReader) ident() string { - return r.string() -} - -func (r *importReader) qualifiedIdent() (*types.Package, string) { - name := r.string() - pkg := r.pkg() - return pkg, name -} - -func (r *importReader) pos() token.Pos { - delta := r.int64() - if delta != deltaNewFile { - r.prevLine += delta - } else if l := r.int64(); l == -1 { - r.prevLine += deltaNewFile - } else { - r.prevFile = r.string() - r.prevLine = l - } - - if r.prevFile == "" && r.prevLine == 0 { - return token.NoPos - } - - return r.p.fake.pos(r.prevFile, int(r.prevLine)) -} - -func (r *importReader) typ() types.Type { - return r.p.typAt(r.uint64(), nil) -} - -func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) - return ok -} - -func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } -func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } - -func (r *importReader) doType(base *types.Named) types.Type { - switch k := r.kind(); k { - default: - errorf("unexpected kind tag in %q: %v", r.p.ipath, k) - return nil - - case definedType: - pkg, name := r.qualifiedIdent() - r.p.doDecl(pkg, name) - return pkg.Scope().Lookup(name).(*types.TypeName).Type() - case pointerType: - return types.NewPointer(r.typ()) - case sliceType: - return types.NewSlice(r.typ()) - case arrayType: - n := r.uint64() - return types.NewArray(r.typ(), int64(n)) - case chanType: - dir := chanDir(int(r.uint64())) - return types.NewChan(dir, r.typ()) - case mapType: - return types.NewMap(r.typ(), r.typ()) - case signatureType: - r.currPkg = r.pkg() - return r.signature(nil) - - case structType: - r.currPkg = r.pkg() - - fields := make([]*types.Var, r.uint64()) - tags := make([]string, len(fields)) - for i := range fields { - fpos := r.pos() - fname := r.ident() - ftyp := r.typ() - emb := r.bool() - tag := r.string() - - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) - tags[i] = tag - } - return types.NewStruct(fields, tags) - - case interfaceType: - r.currPkg = r.pkg() - - embeddeds := make([]types.Type, r.uint64()) - for i := range embeddeds { - _ = r.pos() - embeddeds[i] = r.typ() - } - - methods := make([]*types.Func, r.uint64()) - for i := range methods { - mpos := r.pos() - mname := r.ident() - - // TODO(mdempsky): Matches bimport.go, but I - // don't agree with this. - var recv *types.Var - if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) - } - - msig := r.signature(recv) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) - } - - typ := newInterface(methods, embeddeds) - r.p.interfaceList = append(r.p.interfaceList, typ) - return typ - } -} - -func (r *importReader) kind() itag { - return itag(r.uint64()) -} - -func (r *importReader) signature(recv *types.Var) *types.Signature { - params := r.paramList() - results := r.paramList() - variadic := params.Len() > 0 && r.bool() - return types.NewSignature(recv, params, results, variadic) -} - -func (r *importReader) paramList() *types.Tuple { - xs := make([]*types.Var, r.uint64()) - for i := range xs { - xs[i] = r.param() - } - return types.NewTuple(xs...) -} - -func (r *importReader) param() *types.Var { - pos := r.pos() - name := r.ident() - typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) -} - -func (r *importReader) bool() bool { - return r.uint64() != 0 -} - -func (r *importReader) int64() int64 { - n, err := binary.ReadVarint(&r.declReader) - if err != nil { - errorf("readVarint: %v", err) - } - return n -} - -func (r *importReader) uint64() uint64 { - n, err := binary.ReadUvarint(&r.declReader) - if err != nil { - errorf("readUvarint: %v", err) - } - return n -} - -func (r *importReader) byte() byte { - x, err := r.declReader.ReadByte() - if err != nil { - errorf("declReader.ReadByte: %v", err) - } - return x -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go deleted file mode 100644 index 463f25227..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go deleted file mode 100644 index ab28b95cb..000000000 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go deleted file mode 100644 index 38f596daf..000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import ( - "go/ast" - "go/types" - - "golang.org/x/tools/go/ast/astutil" -) - -// Callee returns the named target of a function call, if any: -// a function, method, builtin, or variable. -func Callee(info *types.Info, call *ast.CallExpr) types.Object { - var obj types.Object - switch fun := astutil.Unparen(call.Fun).(type) { - case *ast.Ident: - obj = info.Uses[fun] // type, var, builtin, or declared func - case *ast.SelectorExpr: - if sel, ok := info.Selections[fun]; ok { - obj = sel.Obj() // method or field - } else { - obj = info.Uses[fun.Sel] // qualified identifier? - } - } - if _, ok := obj.(*types.TypeName); ok { - return nil // T(x) is a conversion, not a call - } - return obj -} - -// StaticCallee returns the target (function or method) of a static -// function call, if any. It returns nil for calls to builtins. -func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { - if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { - return f - } - return nil -} - -func interfaceMethod(f *types.Func) bool { - recv := f.Type().(*types.Signature).Recv() - return recv != nil && types.IsInterface(recv.Type()) -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go deleted file mode 100644 index 9c441dba9..000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/imports.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import "go/types" - -// Dependencies returns all dependencies of the specified packages. -// -// Dependent packages appear in topological order: if package P imports -// package Q, Q appears earlier than P in the result. -// The algorithm follows import statements in the order they -// appear in the source code, so the result is a total order. -// -func Dependencies(pkgs ...*types.Package) []*types.Package { - var result []*types.Package - seen := make(map[*types.Package]bool) - var visit func(pkgs []*types.Package) - visit = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !seen[p] { - seen[p] = true - visit(p.Imports()) - result = append(result, p) - } - } - } - visit(pkgs) - return result -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go deleted file mode 100644 index c7f754500..000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to interface{} values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" - -import ( - "bytes" - "fmt" - "go/types" - "reflect" -) - -// Map is a hash-table-based mapping from types (types.Type) to -// arbitrary interface{} values. The concrete types that implement -// the Type interface are pointers. Since they are not canonicalized, -// == cannot be used to check for equivalence, and thus we cannot -// simply use a Go map. -// -// Just as with map[K]V, a nil *Map is a valid empty map. -// -// Not thread-safe. -// -type Map struct { - hasher Hasher // shared by many Maps - table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused - length int // number of map entries -} - -// entry is an entry (key/value association) in a hash bucket. -type entry struct { - key types.Type - value interface{} -} - -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -// -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} - -// Delete removes the entry with the given key, if any. -// It returns true if the entry was found. -// -func (m *Map) Delete(key types.Type) bool { - if m != nil && m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - for i, e := range bucket { - if e.key != nil && types.Identical(key, e.key) { - // We can't compact the bucket as it - // would disturb iterators. - bucket[i] = entry{} - m.length-- - return true - } - } - } - return false -} - -// At returns the map entry for the given key. -// The result is nil if the entry is not present. -// -func (m *Map) At(key types.Type) interface{} { - if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { - if e.key != nil && types.Identical(key, e.key) { - return e.value - } - } - } - return nil -} - -// Set sets the map entry for key to val, -// and returns the previous entry, if any. -func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { - if m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - var hole *entry - for i, e := range bucket { - if e.key == nil { - hole = &bucket[i] - } else if types.Identical(key, e.key) { - prev = e.value - bucket[i].value = value - return - } - } - - if hole != nil { - *hole = entry{key, value} // overwrite deleted entry - } else { - m.table[hash] = append(bucket, entry{key, value}) - } - } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) - m.table = map[uint32][]entry{hash: {entry{key, value}}} - } - - m.length++ - return -} - -// Len returns the number of map entries. -func (m *Map) Len() int { - if m != nil { - return m.length - } - return 0 -} - -// Iterate calls function f on each entry in the map in unspecified order. -// -// If f should mutate the map, Iterate provides the same guarantees as -// Go maps: if f deletes a map entry that Iterate has not yet reached, -// f will not be invoked for it, but if f inserts a map entry that -// Iterate has not yet reached, whether or not f will be invoked for -// it is unspecified. -// -func (m *Map) Iterate(f func(key types.Type, value interface{})) { - if m != nil { - for _, bucket := range m.table { - for _, e := range bucket { - if e.key != nil { - f(e.key, e.value) - } - } - } - } -} - -// Keys returns a new slice containing the set of map keys. -// The order is unspecified. -func (m *Map) Keys() []types.Type { - keys := make([]types.Type, 0, m.Len()) - m.Iterate(func(key types.Type, _ interface{}) { - keys = append(keys, key) - }) - return keys -} - -func (m *Map) toString(values bool) string { - if m == nil { - return "{}" - } - var buf bytes.Buffer - fmt.Fprint(&buf, "{") - sep := "" - m.Iterate(func(key types.Type, value interface{}) { - fmt.Fprint(&buf, sep) - sep = ", " - fmt.Fprint(&buf, key) - if values { - fmt.Fprintf(&buf, ": %q", value) - } - }) - fmt.Fprint(&buf, "}") - return buf.String() -} - -// String returns a string representation of the map's entries. -// Values are printed using fmt.Sprintf("%v", v). -// Order is unspecified. -// -func (m *Map) String() string { - return m.toString(true) -} - -// KeysString returns a string representation of the map's key set. -// Order is unspecified. -// -func (m *Map) KeysString() string { - return m.toString(false) -} - -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 -} - -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{make(map[types.Type]uint32)} -} - -// Hash computes a hash value for the given type t such that -// Identical(t, t') => Hash(t) == Hash(t'). -func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash -} - -// hashString computes the Fowler–Noll–Vo hash of s. -func hashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { - // See Identical for rationale. - switch t := t.(type) { - case *types.Basic: - return uint32(t.Kind()) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) - - case *types.Struct: - var hash uint32 = 9059 - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - if f.Anonymous() { - hash += 8861 - } - hash += hashString(t.Tag(i)) - hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) - } - return hash - - case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) - - case *types.Signature: - var hash uint32 = 9091 - if t.Variadic() { - hash *= 8863 - } - return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - - case *types.Interface: - var hash uint32 = 9103 - for i, n := 0, t.NumMethods(); i < n; i++ { - // See go/types.identicalMethods for rationale. - // Method order is not significant. - // Ignore m.Pkg(). - m := t.Method(i) - hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) - } - return hash - - case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) - - case *types.Named: - // Not safe with a copying GC; objects may move. - return uint32(reflect.ValueOf(t.Obj()).Pointer()) - - case *types.Tuple: - return h.hashTuple(t) - } - panic(t) -} - -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - var hash uint32 = 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) - } - return hash -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go deleted file mode 100644 index 32084610f..000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements a cache of method sets. - -package typeutil - -import ( - "go/types" - "sync" -) - -// A MethodSetCache records the method set of each type T for which -// MethodSet(T) is called so that repeat queries are fast. -// The zero value is a ready-to-use cache instance. -type MethodSetCache struct { - mu sync.Mutex - named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N - others map[types.Type]*types.MethodSet // all other types -} - -// MethodSet returns the method set of type T. It is thread-safe. -// -// If cache is nil, this function is equivalent to types.NewMethodSet(T). -// Utility functions can thus expose an optional *MethodSetCache -// parameter to clients that care about performance. -// -func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { - if cache == nil { - return types.NewMethodSet(T) - } - cache.mu.Lock() - defer cache.mu.Unlock() - - switch T := T.(type) { - case *types.Named: - return cache.lookupNamed(T).value - - case *types.Pointer: - if N, ok := T.Elem().(*types.Named); ok { - return cache.lookupNamed(N).pointer - } - } - - // all other types - // (The map uses pointer equivalence, not type identity.) - mset := cache.others[T] - if mset == nil { - mset = types.NewMethodSet(T) - if cache.others == nil { - cache.others = make(map[types.Type]*types.MethodSet) - } - cache.others[T] = mset - } - return mset -} - -func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { - if cache.named == nil { - cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) - } - // Avoid recomputing mset(*T) for each distinct Pointer - // instance whose underlying type is a named type. - msets, ok := cache.named[named] - if !ok { - msets.value = types.NewMethodSet(named) - msets.pointer = types.NewMethodSet(types.NewPointer(named)) - cache.named[named] = msets - } - return msets -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go deleted file mode 100644 index 9849c24ce..000000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -// This file defines utilities for user interfaces that display types. - -import "go/types" - -// IntuitiveMethodSet returns the intuitive method set of a type T, -// which is the set of methods you can call on an addressable value of -// that type. -// -// The result always contains MethodSet(T), and is exactly MethodSet(T) -// for interface types and for pointer-to-concrete types. -// For all other concrete types T, the result additionally -// contains each method belonging to *T if there is no identically -// named method on T itself. -// -// This corresponds to user intuition about method sets; -// this function is intended only for user interfaces. -// -// The order of the result is as for types.MethodSet(T). -// -func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { - isPointerToConcrete := func(T types.Type) bool { - ptr, ok := T.(*types.Pointer) - return ok && !types.IsInterface(ptr.Elem()) - } - - var result []*types.Selection - mset := msets.MethodSet(T) - if types.IsInterface(T) || isPointerToConcrete(T) { - for i, n := 0, mset.Len(); i < n; i++ { - result = append(result, mset.At(i)) - } - } else { - // T is some other concrete type. - // Report methods of T and *T, preferring those of T. - pmset := msets.MethodSet(types.NewPointer(T)) - for i, n := 0, pmset.Len(); i < n; i++ { - meth := pmset.At(i) - if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { - meth = m - } - result = append(result, meth) - } - - } - return result -} diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json new file mode 100644 index 000000000..6b36a9296 --- /dev/null +++ b/vendor/google.golang.org/api/internal/service-account.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "project_id", + "private_key_id": "private_key_id", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n", + "client_email": "xyz@developer.gserviceaccount.com", + "client_id": "123", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go deleted file mode 100644 index 7b27e6b12..000000000 --- a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -/* -Package cloudsql exposes access to Google Cloud SQL databases. - -This package does not work in App Engine "flexible environment". - -This package is intended for MySQL drivers to make App Engine-specific -connections. Applications should use this package through database/sql: -Select a pure Go MySQL driver that supports this package, and use sql.Open -with protocol "cloudsql" and an address of the Cloud SQL instance. - -A Go MySQL driver that has been tested to work well with Cloud SQL -is the go-sql-driver: - import "database/sql" - import _ "github.com/go-sql-driver/mysql" - - db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname") - - -Another driver that works well with Cloud SQL is the mymysql driver: - import "database/sql" - import _ "github.com/ziutek/mymysql/godrv" - - db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password") - - -Using either of these drivers, you can perform a standard SQL query. -This example assumes there is a table named 'users' with -columns 'first_name' and 'last_name': - - rows, err := db.Query("SELECT first_name, last_name FROM users") - if err != nil { - log.Errorf(ctx, "db.Query: %v", err) - } - defer rows.Close() - - for rows.Next() { - var firstName string - var lastName string - if err := rows.Scan(&firstName, &lastName); err != nil { - log.Errorf(ctx, "rows.Scan: %v", err) - continue - } - log.Infof(ctx, "First: %v - Last: %v", firstName, lastName) - } - if err := rows.Err(); err != nil { - log.Errorf(ctx, "Row error: %v", err) - } -*/ -package cloudsql - -import ( - "net" -) - -// Dial connects to the named Cloud SQL instance. -func Dial(instance string) (net.Conn, error) { - return connect(instance) -} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go deleted file mode 100644 index af62dba14..000000000 --- a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package cloudsql - -import ( - "net" - - "appengine/cloudsql" -) - -func connect(instance string) (net.Conn, error) { - return cloudsql.Dial(instance) -} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go deleted file mode 100644 index 90fa7b31e..000000000 --- a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package cloudsql - -import ( - "errors" - "net" -) - -func connect(instance string) (net.Conn, error) { - return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`) -} diff --git a/vendor/honnef.co/go/js/dom/LICENSE b/vendor/honnef.co/go/js/dom/LICENSE deleted file mode 100644 index 676a9feae..000000000 --- a/vendor/honnef.co/go/js/dom/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Dominik Honnef - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/honnef.co/go/js/dom/README.md b/vendor/honnef.co/go/js/dom/README.md deleted file mode 100644 index 3ea280fee..000000000 --- a/vendor/honnef.co/go/js/dom/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# js/dom - -Package dom provides GopherJS bindings for the JavaScript DOM APIs. - -## Install - - go get honnef.co/go/js/dom - -## Documentation - -For documentation, see http://godoc.org/honnef.co/go/js/dom diff --git a/vendor/honnef.co/go/js/dom/dom.go b/vendor/honnef.co/go/js/dom/dom.go deleted file mode 100644 index f84d1399a..000000000 --- a/vendor/honnef.co/go/js/dom/dom.go +++ /dev/null @@ -1,3082 +0,0 @@ -// Package dom provides GopherJS bindings for the JavaScript DOM APIs. -// -// This package is an in progress effort of providing idiomatic Go -// bindings for the DOM, wrapping the JavaScript DOM APIs. The API is -// neither complete nor frozen yet, but a great amount of the DOM is -// already useable. -// -// While the package tries to be idiomatic Go, it also tries to stick -// closely to the JavaScript APIs, so that one does not need to learn -// a new set of APIs if one is already familiar with it. -// -// One decision that hasn't been made yet is what parts exactly should -// be part of this package. It is, for example, possible that the -// canvas APIs will live in a separate package. On the other hand, -// types such as StorageEvent (the event that gets fired when the -// HTML5 storage area changes) will be part of this package, simply -// due to how the DOM is structured – even if the actual storage APIs -// might live in a separate package. This might require special care -// to avoid circular dependencies. -// -// The documentation for some of the identifiers is based on the -// MDN Web Docs by Mozilla Contributors (https://developer.mozilla.org/en-US/docs/Web/API), -// licensed under CC-BY-SA 2.5 (https://creativecommons.org/licenses/by-sa/2.5/). -// -// -// Getting started -// -// The usual entry point of using the dom package is by using the -// GetWindow() function which will return a Window, from which you can -// get things such as the current Document. -// -// -// Interfaces -// -// The DOM has a big amount of different element and event types, but -// they all follow three interfaces. All functions that work on or -// return generic elements/events will return one of the three -// interfaces Element, HTMLElement or Event. In these interface values -// there will be concrete implementations, such as -// HTMLParagraphElement or FocusEvent. It's also not unusual that -// values of type Element also implement HTMLElement. In all cases, -// type assertions can be used. -// -// Example: -// el := dom.GetWindow().Document().QuerySelector(".some-element") -// htmlEl := el.(dom.HTMLElement) -// pEl := el.(*dom.HTMLParagraphElement) -// -// -// Live collections -// -// Several functions in the JavaScript DOM return "live" -// collections of elements, that is collections that will be -// automatically updated when elements get removed or added to the -// DOM. Our bindings, however, return static slices of elements that, -// once created, will not automatically reflect updates to the DOM. -// This is primarily done so that slices can actually be used, as -// opposed to a form of iterator, but also because we think that -// magically changing data isn't Go's nature and that snapshots of -// state are a lot easier to reason about. -// -// This does not, however, mean that all objects are snapshots. -// Elements, events and generally objects that aren't slices or maps -// are simple wrappers around JavaScript objects, and as such -// attributes as well as method calls will always return the most -// current data. To reflect this behaviour, these bindings use -// pointers to make the semantics clear. Consider the following -// example: -// -// d := dom.GetWindow().Document() -// e1 := d.GetElementByID("my-element") -// e2 := d.GetElementByID("my-element") -// -// e1.Class().SetString("some-class") -// println(e1.Class().String() == e2.Class().String()) -// -// The above example will print `true`. -// -// -// DOMTokenList -// -// Some objects in the JS API have two versions of attributes, one -// that returns a string and one that returns a DOMTokenList to ease -// manipulation of string-delimited lists. Some other objects only -// provide DOMTokenList, sometimes DOMSettableTokenList. To simplify -// these bindings, only the DOMTokenList variant will be made -// available, by the type TokenList. In cases where the string -// attribute was the only way to completely replace the value, our -// TokenList will provide Set([]string) and SetString(string) methods, -// which will be able to accomplish the same. Additionally, our -// TokenList will provide methods to convert it to strings and slices. -// -// -// Backwards compatibility -// -// This package has a relatively stable API. However, there will be -// backwards incompatible changes from time to time. This is because -// the package isn't complete yet, as well as because the DOM is a -// moving target, and APIs do change sometimes. -// -// While an attempt is made to reduce changing function signatures to -// a minimum, it can't always be guaranteed. Sometimes mistakes in the -// bindings are found that require changing arguments or return -// values. -// -// Interfaces defined in this package may also change on a -// semi-regular basis, as new methods are added to them. This happens -// because the bindings aren't complete and can never really be, as -// new features are added to the DOM. -// -// If you depend on none of the APIs changing unexpectedly, you're -// advised to vendor this package. -package dom // import "honnef.co/go/js/dom" - -import ( - "image" - "image/color" - "strings" - "time" - - "github.com/gopherjs/gopherjs/js" -) - -// toString returns the string representation of o. If o is nil or -// undefined, the empty string will be returned instead. -func toString(o *js.Object) string { - if o == nil || o == js.Undefined { - return "" - } - return o.String() -} - -func callRecover(o *js.Object, fn string, args ...interface{}) (err error) { - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr != nil { - err = panicErr - } else { - panic(e) - } - }() - o.Call(fn, args...) - return nil -} - -func elementConstructor(o *js.Object) *js.Object { - if n := o.Get("node"); n != js.Undefined { - // Support elements wrapped in Polymer's DOM APIs. - return n.Get("constructor") - } - return o.Get("constructor") -} - -func arrayToObjects(o *js.Object) []*js.Object { - var out []*js.Object - for i := 0; i < o.Length(); i++ { - out = append(out, o.Index(i)) - } - return out -} - -func nodeListToObjects(o *js.Object) []*js.Object { - if o.Get("constructor") == js.Global.Get("Array") { - // Support Polymer's DOM APIs, which uses Arrays instead of - // NodeLists - return arrayToObjects(o) - } - var out []*js.Object - length := o.Get("length").Int() - for i := 0; i < length; i++ { - out = append(out, o.Call("item", i)) - } - return out -} - -func nodeListToNodes(o *js.Object) []Node { - var out []Node - for _, obj := range nodeListToObjects(o) { - out = append(out, wrapNode(obj)) - } - return out -} - -func nodeListToElements(o *js.Object) []Element { - var out []Element - for _, obj := range nodeListToObjects(o) { - out = append(out, wrapElement(obj)) - } - return out -} - -func nodeListToHTMLElements(o *js.Object) []HTMLElement { - var out []HTMLElement - for _, obj := range nodeListToObjects(o) { - out = append(out, wrapHTMLElement(obj)) - } - return out -} - -func WrapDocument(o *js.Object) Document { - return wrapDocument(o) -} - -func WrapDocumentFragment(o *js.Object) DocumentFragment { - return wrapDocumentFragment(o) -} - -func WrapNode(o *js.Object) Node { - return wrapNode(o) -} - -func WrapElement(o *js.Object) Element { - return wrapElement(o) -} - -func WrapHTMLElement(o *js.Object) HTMLElement { - return wrapHTMLElement(o) -} - -func wrapDocument(o *js.Object) Document { - switch elementConstructor(o) { - case js.Global.Get("HTMLDocument"): - return &htmlDocument{&document{&BasicNode{o}}} - default: - return &document{&BasicNode{o}} - } -} - -func wrapDocumentFragment(o *js.Object) DocumentFragment { - switch elementConstructor(o) { - // TODO: do we have any other stuff we want to check - default: - return &documentFragment{&BasicNode{o}} - } -} - -func wrapNode(o *js.Object) Node { - if o == nil || o == js.Undefined { - return nil - } - switch elementConstructor(o) { - // TODO all the non-element cases - case js.Global.Get("Text"): - return &Text{&BasicNode{o}} - default: - return wrapElement(o) - } -} - -func wrapElement(o *js.Object) Element { - if o == nil || o == js.Undefined { - return nil - } - switch elementConstructor(o) { - // TODO all the non-HTML cases - default: - return wrapHTMLElement(o) - } -} - -func wrapHTMLElement(o *js.Object) HTMLElement { - if o == nil || o == js.Undefined { - return nil - } - el := &BasicHTMLElement{&BasicElement{&BasicNode{o}}} - c := elementConstructor(o) - switch c { - case js.Global.Get("HTMLAnchorElement"): - return &HTMLAnchorElement{BasicHTMLElement: el, URLUtils: &URLUtils{Object: o}} - case js.Global.Get("HTMLAppletElement"): - return &HTMLAppletElement{BasicHTMLElement: el} - case js.Global.Get("HTMLAreaElement"): - return &HTMLAreaElement{BasicHTMLElement: el, URLUtils: &URLUtils{Object: o}} - case js.Global.Get("HTMLAudioElement"): - return &HTMLAudioElement{HTMLMediaElement: &HTMLMediaElement{BasicHTMLElement: el}} - case js.Global.Get("HTMLBaseElement"): - return &HTMLBaseElement{BasicHTMLElement: el} - case js.Global.Get("HTMLBodyElement"): - return &HTMLBodyElement{BasicHTMLElement: el} - case js.Global.Get("HTMLBRElement"): - return &HTMLBRElement{BasicHTMLElement: el} - case js.Global.Get("HTMLButtonElement"): - return &HTMLButtonElement{BasicHTMLElement: el} - case js.Global.Get("HTMLCanvasElement"): - return &HTMLCanvasElement{BasicHTMLElement: el} - case js.Global.Get("HTMLDataElement"): - return &HTMLDataElement{BasicHTMLElement: el} - case js.Global.Get("HTMLDataListElement"): - return &HTMLDataListElement{BasicHTMLElement: el} - case js.Global.Get("HTMLDirectoryElement"): - return &HTMLDirectoryElement{BasicHTMLElement: el} - case js.Global.Get("HTMLDivElement"): - return &HTMLDivElement{BasicHTMLElement: el} - case js.Global.Get("HTMLDListElement"): - return &HTMLDListElement{BasicHTMLElement: el} - case js.Global.Get("HTMLEmbedElement"): - return &HTMLEmbedElement{BasicHTMLElement: el} - case js.Global.Get("HTMLFieldSetElement"): - return &HTMLFieldSetElement{BasicHTMLElement: el} - case js.Global.Get("HTMLFontElement"): - return &HTMLFontElement{BasicHTMLElement: el} - case js.Global.Get("HTMLFormElement"): - return &HTMLFormElement{BasicHTMLElement: el} - case js.Global.Get("HTMLFrameElement"): - return &HTMLFrameElement{BasicHTMLElement: el} - case js.Global.Get("HTMLFrameSetElement"): - return &HTMLFrameSetElement{BasicHTMLElement: el} - case js.Global.Get("HTMLHeadElement"): - return &HTMLHeadElement{BasicHTMLElement: el} - case js.Global.Get("HTMLHeadingElement"): - return &HTMLHeadingElement{BasicHTMLElement: el} - case js.Global.Get("HTMLHtmlElement"): - return &HTMLHtmlElement{BasicHTMLElement: el} - case js.Global.Get("HTMLHRElement"): - return &HTMLHRElement{BasicHTMLElement: el} - case js.Global.Get("HTMLIFrameElement"): - return &HTMLIFrameElement{BasicHTMLElement: el} - case js.Global.Get("HTMLImageElement"): - return &HTMLImageElement{BasicHTMLElement: el} - case js.Global.Get("HTMLInputElement"): - return &HTMLInputElement{BasicHTMLElement: el} - case js.Global.Get("HTMLKeygenElement"): - return &HTMLKeygenElement{BasicHTMLElement: el} - case js.Global.Get("HTMLLabelElement"): - return &HTMLLabelElement{BasicHTMLElement: el} - case js.Global.Get("HTMLLegendElement"): - return &HTMLLegendElement{BasicHTMLElement: el} - case js.Global.Get("HTMLLIElement"): - return &HTMLLIElement{BasicHTMLElement: el} - case js.Global.Get("HTMLLinkElement"): - return &HTMLLinkElement{BasicHTMLElement: el} - case js.Global.Get("HTMLMapElement"): - return &HTMLMapElement{BasicHTMLElement: el} - case js.Global.Get("HTMLMediaElement"): - return &HTMLMediaElement{BasicHTMLElement: el} - case js.Global.Get("HTMLMenuElement"): - return &HTMLMenuElement{BasicHTMLElement: el} - case js.Global.Get("HTMLMetaElement"): - return &HTMLMetaElement{BasicHTMLElement: el} - case js.Global.Get("HTMLMeterElement"): - return &HTMLMeterElement{BasicHTMLElement: el} - case js.Global.Get("HTMLModElement"): - return &HTMLModElement{BasicHTMLElement: el} - case js.Global.Get("HTMLObjectElement"): - return &HTMLObjectElement{BasicHTMLElement: el} - case js.Global.Get("HTMLOListElement"): - return &HTMLOListElement{BasicHTMLElement: el} - case js.Global.Get("HTMLOptGroupElement"): - return &HTMLOptGroupElement{BasicHTMLElement: el} - case js.Global.Get("HTMLOptionElement"): - return &HTMLOptionElement{BasicHTMLElement: el} - case js.Global.Get("HTMLOutputElement"): - return &HTMLOutputElement{BasicHTMLElement: el} - case js.Global.Get("HTMLParagraphElement"): - return &HTMLParagraphElement{BasicHTMLElement: el} - case js.Global.Get("HTMLParamElement"): - return &HTMLParamElement{BasicHTMLElement: el} - case js.Global.Get("HTMLPreElement"): - return &HTMLPreElement{BasicHTMLElement: el} - case js.Global.Get("HTMLProgressElement"): - return &HTMLProgressElement{BasicHTMLElement: el} - case js.Global.Get("HTMLQuoteElement"): - return &HTMLQuoteElement{BasicHTMLElement: el} - case js.Global.Get("HTMLScriptElement"): - return &HTMLScriptElement{BasicHTMLElement: el} - case js.Global.Get("HTMLSelectElement"): - return &HTMLSelectElement{BasicHTMLElement: el} - case js.Global.Get("HTMLSourceElement"): - return &HTMLSourceElement{BasicHTMLElement: el} - case js.Global.Get("HTMLSpanElement"): - return &HTMLSpanElement{BasicHTMLElement: el} - case js.Global.Get("HTMLStyleElement"): - return &HTMLStyleElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableElement"): - return &HTMLTableElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableCaptionElement"): - return &HTMLTableCaptionElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableCellElement"): - return &HTMLTableCellElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableDataCellElement"): - return &HTMLTableDataCellElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableHeaderCellElement"): - return &HTMLTableHeaderCellElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableColElement"): - return &HTMLTableColElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableRowElement"): - return &HTMLTableRowElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTableSectionElement"): - return &HTMLTableSectionElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTextAreaElement"): - return &HTMLTextAreaElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTimeElement"): - return &HTMLTimeElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTitleElement"): - return &HTMLTitleElement{BasicHTMLElement: el} - case js.Global.Get("HTMLTrackElement"): - return &HTMLTrackElement{BasicHTMLElement: el} - case js.Global.Get("HTMLUListElement"): - return &HTMLUListElement{BasicHTMLElement: el} - case js.Global.Get("HTMLUnknownElement"): - return &HTMLUnknownElement{BasicHTMLElement: el} - case js.Global.Get("HTMLVideoElement"): - return &HTMLVideoElement{HTMLMediaElement: &HTMLMediaElement{BasicHTMLElement: el}} - case js.Global.Get("HTMLElement"): - return el - default: - return el - } -} - -func getForm(o *js.Object) *HTMLFormElement { - form := wrapHTMLElement(o.Get("form")) - if form == nil { - return nil - } - return form.(*HTMLFormElement) -} - -func getLabels(o *js.Object) []*HTMLLabelElement { - labels := nodeListToElements(o.Get("labels")) - out := make([]*HTMLLabelElement, len(labels)) - for i, label := range labels { - out[i] = label.(*HTMLLabelElement) - } - return out -} - -func getOptions(o *js.Object, attr string) []*HTMLOptionElement { - options := nodeListToElements(o.Get(attr)) - out := make([]*HTMLOptionElement, len(options)) - for i, option := range options { - out[i] = option.(*HTMLOptionElement) - } - return out -} - -func GetWindow() Window { - return &window{js.Global} -} - -type TokenList struct { - dtl *js.Object // the underlying DOMTokenList - o *js.Object // the object to which the DOMTokenList belongs - sa string // the name of the corresponding string attribute, empty if there isn't one - - Length int `js:"length"` -} - -func (tl *TokenList) Item(idx int) string { - o := tl.dtl.Call("item", idx) - return toString(o) -} - -func (tl *TokenList) Contains(token string) bool { - return tl.dtl.Call("contains", token).Bool() -} - -func (tl *TokenList) Add(token string) { - tl.dtl.Call("add", token) -} - -func (tl *TokenList) Remove(token string) { - tl.dtl.Call("remove", token) -} - -func (tl *TokenList) Toggle(token string) { - tl.dtl.Call("toggle", token) -} - -func (tl *TokenList) String() string { - if tl.sa != "" { - return tl.o.Get(tl.sa).String() - } - if tl.dtl.Get("constructor") == js.Global.Get("DOMSettableTokenList") { - return tl.dtl.Get("value").String() - } - // We could manually construct the string, but I am not aware of - // any case where we have neither a string attribute nor - // DOMSettableTokenList. - return "" -} - -func (tl *TokenList) Slice() []string { - var out []string - length := tl.dtl.Get("length").Int() - for i := 0; i < length; i++ { - out = append(out, tl.dtl.Call("item", i).String()) - } - return out -} - -// SetString sets the TokenList's value to the space-separated list of -// tokens in s. -func (tl *TokenList) SetString(s string) { - if tl.sa != "" { - tl.o.Set(tl.sa, s) - return - } - if tl.dtl.Get("constructor") == js.Global.Get("DOMSettableTokenList") { - tl.dtl.Set("value", s) - return - } - // This shouldn't be possible - panic("no way to SetString on this TokenList") -} - -// Set sets the TokenList's value to the list of tokens in s. -// -// Individual tokens in s shouldn't countain spaces. -func (tl *TokenList) Set(s []string) { - tl.SetString(strings.Join(s, " ")) -} - -type Document interface { - Node - ParentNode - - Async() bool - SetAsync(bool) - Doctype() DocumentType - DocumentElement() Element - DocumentURI() string - Implementation() DOMImplementation - LastStyleSheetSet() string - PreferredStyleSheetSet() string // TODO correct type? - SelectedStyleSheetSet() string // TODO correct type? - StyleSheets() []StyleSheet // TODO s/StyleSheet/Stylesheet/ ? - StyleSheetSets() []StyleSheet // TODO correct type? - AdoptNode(node Node) Node - ImportNode(node Node, deep bool) Node - CreateElement(name string) Element - CreateElementNS(namespace, name string) Element - CreateTextNode(s string) *Text - ElementFromPoint(x, y int) Element - EnableStyleSheetsForSet(name string) - GetElementsByClassName(name string) []Element - GetElementsByTagName(name string) []Element - GetElementsByTagNameNS(ns, name string) []Element - GetElementByID(id string) Element - QuerySelector(sel string) Element - QuerySelectorAll(sel string) []Element - - CreateDocumentFragment() DocumentFragment -} - -type DocumentFragment interface { - Node - ParentNode - QuerySelector(sel string) Element - QuerySelectorAll(sel string) []Element - GetElementByID(id string) Element -} - -type HTMLDocument interface { - Document - - ActiveElement() HTMLElement - Body() HTMLElement - Cookie() string - SetCookie(string) - DefaultView() Window - DesignMode() bool - SetDesignMode(bool) - Domain() string - SetDomain(string) - Forms() []*HTMLFormElement - Head() *HTMLHeadElement - Images() []*HTMLImageElement - LastModified() time.Time - Links() []HTMLElement - Location() *Location - Plugins() []*HTMLEmbedElement - ReadyState() string - Referrer() string - Scripts() []*HTMLScriptElement - Title() string - SetTitle(string) - URL() string - - // TODO HTMLDocument methods -} - -type documentFragment struct { - *BasicNode -} - -func (d documentFragment) GetElementByID(id string) Element { - return wrapElement(d.Call("getElementById", id)) -} - -func (d documentFragment) QuerySelector(sel string) Element { - return (&BasicElement{&BasicNode{d.Object}}).QuerySelector(sel) -} - -func (d documentFragment) QuerySelectorAll(sel string) []Element { - return (&BasicElement{&BasicNode{d.Object}}).QuerySelectorAll(sel) -} - -type document struct { - *BasicNode -} - -type htmlDocument struct { - *document -} - -func (d *htmlDocument) ActiveElement() HTMLElement { - return wrapHTMLElement(d.Get("activeElement")) -} - -func (d *htmlDocument) Body() HTMLElement { - return wrapHTMLElement(d.Get("body")) -} - -func (d *htmlDocument) Cookie() string { - return d.Get("cookie").String() -} - -func (d *htmlDocument) SetCookie(s string) { - d.Set("cookie", s) -} - -func (d *htmlDocument) DefaultView() Window { - return &window{d.Get("defaultView")} -} - -func (d *htmlDocument) DesignMode() bool { - s := d.Get("designMode").String() - return s != "off" -} - -func (d *htmlDocument) SetDesignMode(b bool) { - s := "off" - if b { - s = "on" - } - d.Set("designMode", s) -} - -func (d *htmlDocument) Domain() string { - return d.Get("domain").String() -} - -func (d *htmlDocument) SetDomain(s string) { - d.Set("domain", s) -} - -func (d *htmlDocument) Forms() []*HTMLFormElement { - var els []*HTMLFormElement - forms := d.Get("forms") - length := forms.Get("length").Int() - for i := 0; i < length; i++ { - els = append(els, wrapHTMLElement(forms.Call("item", i)).(*HTMLFormElement)) - } - return els -} - -func (d *htmlDocument) Head() *HTMLHeadElement { - head := wrapElement(d.Get("head")) - if head == nil { - return nil - } - return head.(*HTMLHeadElement) -} - -func (d *htmlDocument) Images() []*HTMLImageElement { - var els []*HTMLImageElement - images := d.Get("images") - length := images.Get("length").Int() - for i := 0; i < length; i++ { - els = append(els, wrapHTMLElement(images.Call("item", i)).(*HTMLImageElement)) - } - return els -} - -func (d *htmlDocument) LastModified() time.Time { - return d.Get("lastModified").Interface().(time.Time) -} - -func (d *htmlDocument) Links() []HTMLElement { - var els []HTMLElement - links := d.Get("links") - length := links.Get("length").Int() - for i := 0; i < length; i++ { - els = append(els, wrapHTMLElement(links.Call("item", i))) - } - return els -} - -func (d *htmlDocument) Location() *Location { - o := d.Get("location") - return &Location{Object: o, URLUtils: &URLUtils{Object: o}} -} - -func (d *htmlDocument) Plugins() []*HTMLEmbedElement { - var els []*HTMLEmbedElement - forms := d.Get("plugins") - length := forms.Get("length").Int() - for i := 0; i < length; i++ { - els = append(els, wrapHTMLElement(forms.Call("item", i)).(*HTMLEmbedElement)) - } - return els -} - -func (d *htmlDocument) ReadyState() string { - return d.Get("readyState").String() -} - -func (d *htmlDocument) Referrer() string { - return d.Get("referrer").String() -} - -func (d *htmlDocument) Scripts() []*HTMLScriptElement { - var els []*HTMLScriptElement - forms := d.Get("scripts") - length := forms.Get("length").Int() - for i := 0; i < length; i++ { - els = append(els, wrapHTMLElement(forms.Call("item", i)).(*HTMLScriptElement)) - } - return els -} - -func (d *htmlDocument) Title() string { - return d.Get("title").String() -} - -func (d *htmlDocument) SetTitle(s string) { - d.Set("title", s) -} - -func (d *htmlDocument) URL() string { - return d.Get("URL").String() -} - -func (d document) Async() bool { - return d.Get("async").Bool() -} - -func (d document) SetAsync(b bool) { - d.Set("async", b) -} - -func (d document) Doctype() DocumentType { - // FIXME implement - panic("not implemented") -} - -func (d document) DocumentElement() Element { - return wrapElement(d.Get("documentElement")) -} - -func (d document) DocumentURI() string { - return d.Get("documentURI").String() -} - -func (d document) Implementation() DOMImplementation { - // FIXME implement - panic("not implemented") -} - -func (d document) LastStyleSheetSet() string { - return d.Get("lastStyleSheetSet").String() -} - -func (d document) PreferredStyleSheetSet() string { - return d.Get("preferredStyleSheetSet").String() -} - -func (d document) SelectedStyleSheetSet() string { - return d.Get("selectedStyleSheetSet").String() -} - -func (d document) StyleSheets() []StyleSheet { - // FIXME implement - panic("not implemented") -} - -func (d document) StyleSheetSets() []StyleSheet { - // FIXME implement - panic("not implemented") -} - -func (d document) AdoptNode(node Node) Node { - return wrapNode(d.Call("adoptNode", node.Underlying())) -} - -func (d document) ImportNode(node Node, deep bool) Node { - return wrapNode(d.Call("importNode", node.Underlying(), deep)) -} - -func (d document) CreateDocumentFragment() DocumentFragment { - return wrapDocumentFragment(d.Call("createDocumentFragment")) -} - -func (d document) CreateElement(name string) Element { - return wrapElement(d.Call("createElement", name)) -} - -func (d document) CreateElementNS(ns string, name string) Element { - return wrapElement(d.Call("createElementNS", ns, name)) -} - -func (d document) CreateTextNode(s string) *Text { - return wrapNode(d.Call("createTextNode", s)).(*Text) -} - -func (d document) ElementFromPoint(x, y int) Element { - return wrapElement(d.Call("elementFromPoint", x, y)) -} - -func (d document) EnableStyleSheetsForSet(name string) { - d.Call("enableStyleSheetsForSet", name) -} - -func (d document) GetElementsByClassName(name string) []Element { - return (&BasicElement{&BasicNode{d.Object}}).GetElementsByClassName(name) -} - -func (d document) GetElementsByTagName(name string) []Element { - return (&BasicElement{&BasicNode{d.Object}}).GetElementsByTagName(name) -} - -func (d document) GetElementsByTagNameNS(ns, name string) []Element { - return (&BasicElement{&BasicNode{d.Object}}).GetElementsByTagNameNS(ns, name) -} - -func (d document) GetElementByID(id string) Element { - return wrapElement(d.Call("getElementById", id)) -} - -func (d document) QuerySelector(sel string) Element { - return (&BasicElement{&BasicNode{d.Object}}).QuerySelector(sel) -} - -func (d document) QuerySelectorAll(sel string) []Element { - return (&BasicElement{&BasicNode{d.Object}}).QuerySelectorAll(sel) -} - -type URLUtils struct { - *js.Object - - Href string `js:"href"` - Protocol string `js:"protocol"` - Host string `js:"host"` - Hostname string `js:"hostname"` - Port string `js:"port"` - Pathname string `js:"pathname"` - Search string `js:"search"` - Hash string `js:"hash"` - Username string `js:"username"` - Password string `js:"password"` - Origin string `js:"origin"` -} - -// TODO Location methods - -type Location struct { - *js.Object - *URLUtils -} - -type HTMLElement interface { - Element - GlobalEventHandlers - - AccessKey() string - Dataset() map[string]string - SetAccessKey(string) - AccessKeyLabel() string - SetAccessKeyLabel(string) - ContentEditable() string - SetContentEditable(string) - IsContentEditable() bool - Dir() string - SetDir(string) - Draggable() bool - SetDraggable(bool) - Lang() string - SetLang(string) - OffsetHeight() float64 - OffsetLeft() float64 - OffsetParent() HTMLElement - OffsetTop() float64 - OffsetWidth() float64 - Style() *CSSStyleDeclaration - Title() string - SetTitle(string) - Blur() - Click() - Focus() -} -type SVGElement interface { - Element - // TODO -} - -type GlobalEventHandlers interface{} - -type Window interface { - EventTarget - - Console() *Console - Document() Document - FrameElement() Element - Location() *Location - Name() string - SetName(string) - InnerHeight() int - InnerWidth() int - Length() int - Opener() Window - OuterHeight() int - OuterWidth() int - ScrollX() int - ScrollY() int - Parent() Window - ScreenX() int - ScreenY() int - ScrollMaxX() int - ScrollMaxY() int - Top() Window - History() History - Navigator() Navigator - Screen() *Screen - Alert(string) - Back() - Blur() - CancelAnimationFrame(int) - ClearInterval(int) - ClearTimeout(int) - Close() - Confirm(string) bool - Focus() - Forward() - GetComputedStyle(el Element, pseudoElt string) *CSSStyleDeclaration - GetSelection() Selection - Home() - MoveBy(dx, dy int) - MoveTo(x, y int) - Open(url, name, features string) Window - OpenDialog(url, name, features string, args []interface{}) Window - PostMessage(message string, target string, transfer []interface{}) - Print() - Prompt(prompt string, initial string) string - RequestAnimationFrame(callback func(time.Duration)) int - ResizeBy(dw, dh int) - ResizeTo(w, h int) - Scroll(x, y int) - ScrollBy(dx, dy int) - ScrollByLines(int) - ScrollTo(x, y int) - SetCursor(name string) - SetInterval(fn func(), delay int) int - SetTimeout(fn func(), delay int) int - Stop() - // TODO constructors -} - -type window struct { - // TODO EventTarget - *js.Object -} - -func (w *window) Console() *Console { - return &Console{w.Get("console")} -} - -func (w *window) Document() Document { - return wrapDocument(w.Get("document")) -} - -func (w *window) FrameElement() Element { - return wrapElement(w.Get("frameElement")) -} - -func (w *window) Location() *Location { - o := w.Get("location") - return &Location{Object: o, URLUtils: &URLUtils{Object: o}} -} - -func (w *window) Name() string { - return w.Get("name").String() -} - -func (w *window) SetName(s string) { - w.Set("name", s) -} - -func (w *window) InnerHeight() int { - return w.Get("innerHeight").Int() -} - -func (w *window) InnerWidth() int { - return w.Get("innerWidth").Int() -} - -func (w *window) Length() int { - return w.Get("length").Int() -} - -func (w *window) Opener() Window { - return &window{w.Get("opener")} -} - -func (w *window) OuterHeight() int { - return w.Get("outerHeight").Int() -} - -func (w *window) OuterWidth() int { - return w.Get("outerWidth").Int() -} - -func (w *window) ScrollX() int { - return w.Get("scrollX").Int() -} - -func (w *window) ScrollY() int { - return w.Get("scrollY").Int() -} - -func (w *window) Parent() Window { - return &window{w.Get("parent")} -} - -func (w *window) ScreenX() int { - return w.Get("screenX").Int() -} - -func (w *window) ScreenY() int { - return w.Get("screenY").Int() -} - -func (w *window) ScrollMaxX() int { - return w.Get("scrollMaxX").Int() -} - -func (w *window) ScrollMaxY() int { - return w.Get("scrollMaxY").Int() -} - -func (w *window) Top() Window { - return &window{w.Get("top")} -} - -func (w *window) History() History { - // FIXME implement - return nil -} - -func (w *window) Navigator() Navigator { - // FIXME implement - panic("not implemented") -} - -func (w *window) Screen() *Screen { - return &Screen{Object: w.Get("screen")} -} - -func (w *window) Alert(msg string) { - w.Call("alert", msg) -} - -func (w *window) Back() { - w.Call("back") -} - -func (w *window) Blur() { - w.Call("blur") -} - -func (w *window) ClearInterval(id int) { - w.Call("clearInterval", id) -} - -func (w *window) ClearTimeout(id int) { - w.Call("clearTimeout", id) -} - -func (w *window) Close() { - w.Call("close") -} - -func (w *window) Confirm(prompt string) bool { - return w.Call("confirm", prompt).Bool() -} - -func (w *window) Focus() { - w.Call("focus") -} - -func (w *window) Forward() { - w.Call("forward") -} - -// GetComputedStyle returns the values of all CSS properties of an -// element after applying the active stylesheets. pseudoElt specifies -// the pseudo-element to match. For normal elements, it must be the -// empty string. -func (w *window) GetComputedStyle(el Element, pseudoElt string) *CSSStyleDeclaration { - var optArg interface{} - if pseudoElt != "" { - optArg = pseudoElt - } - return &CSSStyleDeclaration{w.Call("getComputedStyle", el.Underlying(), optArg)} -} - -func (w *window) GetSelection() Selection { - // FIXME implement - panic("not implemented") -} - -func (w *window) Home() { - w.Call("home") -} - -func (w *window) MoveBy(dx, dy int) { - w.Call("moveBy", dx, dy) -} - -func (w *window) MoveTo(x, y int) { - w.Call("moveTo", x, y) -} - -func (w *window) Open(url, name, features string) Window { - return &window{w.Call("open", url, name, features)} -} - -func (w *window) OpenDialog(url, name, features string, args []interface{}) Window { - return &window{w.Call("openDialog", url, name, features, args)} -} - -func (w *window) PostMessage(message string, target string, transfer []interface{}) { - w.Call("postMessage", message, target, transfer) -} - -func (w *window) Print() { - w.Call("print") -} - -func (w *window) Prompt(prompt string, initial string) string { - return w.Call("prompt", prompt, initial).String() -} - -func (w *window) ResizeBy(dw, dh int) { - w.Call("resizeBy", dw, dh) -} - -func (w *window) ResizeTo(width, height int) { - w.Call("resizeTo", width, height) -} - -func (w *window) Scroll(x, y int) { - w.Call("scroll", x, y) -} - -func (w *window) ScrollBy(dx, dy int) { - w.Call("scrollBy", dx, dy) -} - -func (w *window) ScrollByLines(i int) { - w.Call("scrollByLines", i) -} - -func (w *window) ScrollTo(x, y int) { - w.Call("scrollTo", x, y) -} - -func (w *window) SetCursor(name string) { - w.Call("setCursor", name) -} - -func (w *window) SetInterval(fn func(), delay int) int { - return w.Call("setInterval", fn, delay).Int() -} - -func (w *window) SetTimeout(fn func(), delay int) int { - return w.Call("setTimeout", fn, delay).Int() -} - -func (w *window) Stop() { - w.Call("stop") -} - -// TODO reuse util.EventTarget - -func (w *window) AddEventListener(typ string, useCapture bool, listener func(Event)) func(o *js.Object) { - wrapper := func(o *js.Object) { listener(wrapEvent(o)) } - w.Call("addEventListener", typ, wrapper, useCapture) - return wrapper -} - -func (w *window) RemoveEventListener(typ string, useCapture bool, listener func(*js.Object)) { - w.Call("removeEventListener", typ, listener, useCapture) -} - -func (w *window) DispatchEvent(event Event) bool { - return w.Call("dispatchEvent", event).Bool() -} - -func wrapDOMHighResTimeStamp(o *js.Object) time.Duration { - return time.Duration(o.Float() * float64(time.Millisecond)) -} - -func (w *window) RequestAnimationFrame(callback func(time.Duration)) int { - wrapper := func(o *js.Object) { callback(wrapDOMHighResTimeStamp(o)) } - return w.Call("requestAnimationFrame", wrapper).Int() -} - -func (w *window) CancelAnimationFrame(requestID int) { - w.Call("cancelAnimationFrame", requestID) -} - -// TODO all the other window methods - -type Selection interface { - // TODO -} - -type Screen struct { - *js.Object - AvailTop int `js:"availTop"` - AvailLeft int `js:"availLeft"` - AvailHeight int `js:"availHeight"` - AvailWidth int `js:"availWidth"` - ColorDepth int `js:"colorDepth"` - Height int `js:"height"` - Left int `js:"left"` - PixelDepth int `js:"pixelDepth"` - Top int `js:"top"` - Width int `js:"width"` -} - -type Navigator interface { - NavigatorID - NavigatorLanguage - NavigatorOnLine - NavigatorGeolocation - // NavigatorPlugins - // NetworkInformation - CookieEnabled() bool - DoNotTrack() string - RegisterProtocolHandler(protocol, uri, title string) -} - -type NavigatorID interface { - AppName() string - AppVersion() string - Platform() string - Product() string - UserAgent() string -} - -type NavigatorLanguage interface { - Language() string -} - -type NavigatorOnLine interface { - Online() bool -} - -type NavigatorGeolocation interface { - Geolocation() Geolocation -} - -type Geolocation interface { - // TODO wrap PositionOptions into something that uses the JS - // object - CurrentPosition(success func(Position), err func(PositionError), opts PositionOptions) Position - WatchPosition(success func(Position), err func(PositionError), opts PositionOptions) int - ClearWatch(int) -} - -type PositionError struct { - *js.Object - Code int `js:"code"` -} - -func (err *PositionError) Error() string { - return err.Call("message").String() -} - -type PositionOptions struct { - EnableHighAccuracy bool - Timeout time.Duration - MaximumAge time.Duration -} - -type Position struct { - Coords *Coordinates - Timestamp time.Time -} - -type Coordinates struct { - *js.Object - Latitude float64 `js:"latitude"` - Longitude float64 `js:"longitude"` - Altitude float64 `js:"altitude"` - Accuracy float64 `js:"accuracy"` - AltitudeAccuracy float64 `js:"altitudeAccuracy"` - Heading float64 `js:"heading"` - Speed float64 `js:"speed"` -} - -type History interface { - Length() int - State() interface{} - Back() - Forward() - Go(offset int) - PushState(state interface{}, title string, url string) - ReplaceState(state interface{}, title string, url string) -} - -type Console struct { - *js.Object - // TODO will replace the js/console package -} - -type SVGDocument interface{} -type DocumentType interface{} -type DOMImplementation interface{} -type StyleSheet interface{} -type CSSStyleSheet interface{} - -type Node interface { - EventTarget - - Underlying() *js.Object - BaseURI() string - ChildNodes() []Node - FirstChild() Node - LastChild() Node - NextSibling() Node - NodeName() string - NodeType() int - NodeValue() string - SetNodeValue(string) - OwnerDocument() Document - ParentNode() Node - ParentElement() Element - PreviousSibling() Node - TextContent() string - SetTextContent(string) - AppendChild(Node) - CloneNode(deep bool) Node - CompareDocumentPosition(Node) int - Contains(Node) bool - HasChildNodes() bool - InsertBefore(which Node, before Node) - IsDefaultNamespace(string) bool - IsEqualNode(Node) bool - LookupPrefix() string - LookupNamespaceURI(string) string - Normalize() - RemoveChild(Node) - ReplaceChild(newChild, oldChild Node) -} - -// Type BasicNode implements the Node interface and is embedded by -// concrete node types and element types. -type BasicNode struct { - *js.Object -} - -func (n *BasicNode) Underlying() *js.Object { - return n.Object -} - -func (n *BasicNode) AddEventListener(typ string, useCapture bool, listener func(Event)) func(*js.Object) { - wrapper := func(o *js.Object) { listener(wrapEvent(o)) } - n.Call("addEventListener", typ, wrapper, useCapture) - return wrapper -} - -func (n *BasicNode) RemoveEventListener(typ string, useCapture bool, listener func(*js.Object)) { - n.Call("removeEventListener", typ, listener, useCapture) -} - -func (n *BasicNode) DispatchEvent(event Event) bool { - return n.Call("dispatchEvent", event).Bool() -} - -func (n *BasicNode) BaseURI() string { - return n.Get("baseURI").String() -} - -func (n *BasicNode) ChildNodes() []Node { - return nodeListToNodes(n.Get("childNodes")) -} - -func (n *BasicNode) FirstChild() Node { - return wrapNode(n.Get("firstChild")) -} - -func (n *BasicNode) LastChild() Node { - return wrapNode(n.Get("lastChild")) -} - -func (n *BasicNode) NextSibling() Node { - return wrapNode(n.Get("nextSibling")) -} - -func (n *BasicNode) NodeName() string { - return n.Get("nodeName").String() -} - -func (n *BasicNode) NodeType() int { - return n.Get("nodeType").Int() -} - -func (n *BasicNode) NodeValue() string { - return toString(n.Get("nodeValue")) -} - -func (n *BasicNode) SetNodeValue(s string) { - n.Set("nodeValue", s) -} - -func (n *BasicNode) OwnerDocument() Document { - // FIXME implement - panic("not implemented") -} - -func (n *BasicNode) ParentNode() Node { - return wrapNode(n.Get("parentNode")) -} - -func (n *BasicNode) ParentElement() Element { - return wrapElement(n.Get("parentElement")) -} - -func (n *BasicNode) PreviousSibling() Node { - return wrapNode(n.Get("previousSibling")) -} - -func (n *BasicNode) TextContent() string { - return toString(n.Get("textContent")) -} - -func (n *BasicNode) SetTextContent(s string) { - n.Set("textContent", s) -} - -func (n *BasicNode) AppendChild(newchild Node) { - n.Call("appendChild", newchild.Underlying()) -} - -func (n *BasicNode) CloneNode(deep bool) Node { - return wrapNode(n.Call("cloneNode", deep)) -} - -const ( - DocumentPositionDisconnected = 1 - DocumentPositionPreceding = 2 - DocumentPositionFollowing = 4 - DocumentPositionContains = 8 - DocumentPositionContainedBy = 16 - DocumentPositionImplementationSpecific = 32 -) - -func (n *BasicNode) CompareDocumentPosition(other Node) int { - return n.Call("compareDocumentPosition", other.Underlying()).Int() -} - -func (n *BasicNode) Contains(other Node) bool { - return n.Call("contains", other.Underlying()).Bool() -} - -func (n *BasicNode) HasChildNodes() bool { - return n.Call("hasChildNodes").Bool() -} - -func (n *BasicNode) InsertBefore(which Node, before Node) { - var o interface{} - if before != nil { - o = before.Underlying() - } - n.Call("insertBefore", which.Underlying(), o) -} - -func (n *BasicNode) IsDefaultNamespace(s string) bool { - return n.Call("isDefaultNamespace", s).Bool() -} - -func (n *BasicNode) IsEqualNode(other Node) bool { - return n.Call("isEqualNode", other.Underlying()).Bool() -} - -func (n *BasicNode) LookupPrefix() string { - return n.Call("lookupPrefix").String() -} - -func (n *BasicNode) LookupNamespaceURI(s string) string { - return toString(n.Call("lookupNamespaceURI", s)) -} - -func (n *BasicNode) Normalize() { - n.Call("normalize") -} - -func (n *BasicNode) RemoveChild(other Node) { - n.Call("removeChild", other.Underlying()) -} - -func (n *BasicNode) ReplaceChild(newChild, oldChild Node) { - n.Call("replaceChild", newChild.Underlying(), oldChild.Underlying()) -} - -type Element interface { - Node - ParentNode - ChildNode - - Attributes() map[string]string - Class() *TokenList - ID() string - SetID(string) - TagName() string - GetAttribute(string) string // TODO can attributes only be strings? - GetAttributeNS(ns string, name string) string // can attributes only be strings? - GetBoundingClientRect() ClientRect - GetElementsByClassName(string) []Element - GetElementsByTagName(string) []Element - GetElementsByTagNameNS(ns string, name string) []Element - HasAttribute(string) bool - HasAttributeNS(ns string, name string) bool - QuerySelector(string) Element - QuerySelectorAll(string) []Element - RemoveAttribute(string) - RemoveAttributeNS(ns string, name string) - SetAttribute(name string, value string) - SetAttributeNS(ns string, name string, value string) - InnerHTML() string - SetInnerHTML(string) - OuterHTML() string - SetOuterHTML(string) -} - -type ClientRect struct { - *js.Object - Height float64 `js:"height"` - Width float64 `js:"width"` - Left float64 `js:"left"` - Right float64 `js:"right"` - Top float64 `js:"top"` - Bottom float64 `js:"bottom"` -} - -type ParentNode interface { - // No properties/methods that aren't experimental -} - -type ChildNode interface { - PreviousElementSibling() Element - NextElementSibling() Element -} - -// Type BasicHTMLElement implements the HTMLElement interface and is -// embedded by concrete HTML element types. -type BasicHTMLElement struct { - *BasicElement - // TODO globalEventHandlers -} - -func (e *BasicHTMLElement) AccessKey() string { - return e.Get("accessKey").String() -} - -func (e *BasicHTMLElement) Dataset() map[string]string { - o := e.Get("dataset") - data := map[string]string{} - keys := js.Keys(o) - for _, key := range keys { - data[key] = o.Get(key).String() - } - return data -} - -func (e *BasicHTMLElement) SetAccessKey(s string) { - e.Set("accessKey", s) -} - -func (e *BasicHTMLElement) AccessKeyLabel() string { - return e.Get("accessKeyLabel").String() -} - -func (e *BasicHTMLElement) SetAccessKeyLabel(s string) { - e.Set("accessKeyLabel", s) -} - -func (e *BasicHTMLElement) ContentEditable() string { - return e.Get("contentEditable").String() -} - -func (e *BasicHTMLElement) SetContentEditable(s string) { - e.Set("contentEditable", s) -} - -func (e *BasicHTMLElement) IsContentEditable() bool { - return e.Get("isContentEditable").Bool() -} - -func (e *BasicHTMLElement) Dir() string { - return e.Get("dir").String() -} - -func (e *BasicHTMLElement) SetDir(s string) { - e.Set("dir", s) -} - -func (e *BasicHTMLElement) Draggable() bool { - return e.Get("draggable").Bool() -} - -func (e *BasicHTMLElement) SetDraggable(b bool) { - e.Set("draggable", b) -} - -func (e *BasicHTMLElement) Lang() string { - return e.Get("lang").String() -} - -func (e *BasicHTMLElement) SetLang(s string) { - e.Set("lang", s) -} - -func (e *BasicHTMLElement) OffsetHeight() float64 { - return e.Get("offsetHeight").Float() -} - -func (e *BasicHTMLElement) OffsetLeft() float64 { - return e.Get("offsetLeft").Float() -} - -func (e *BasicHTMLElement) OffsetParent() HTMLElement { - return wrapHTMLElement(e.Get("offsetParent")) -} - -func (e *BasicHTMLElement) OffsetTop() float64 { - return e.Get("offsetTop").Float() -} - -func (e *BasicHTMLElement) OffsetWidth() float64 { - return e.Get("offsetWidth").Float() -} - -func (e *BasicHTMLElement) Style() *CSSStyleDeclaration { - return &CSSStyleDeclaration{e.Get("style")} -} - -func (e *BasicHTMLElement) TabIndex() int { - return e.Get("tabIndex").Int() -} - -func (e *BasicHTMLElement) SetTabIndex(i int) { - e.Set("tabIndex", i) -} - -func (e *BasicHTMLElement) Title() string { - return e.Get("title").String() -} - -func (e *BasicHTMLElement) SetTitle(s string) { - e.Set("title", s) -} - -func (e *BasicHTMLElement) Blur() { - e.Call("blur") -} - -func (e *BasicHTMLElement) Click() { - e.Call("click") -} - -func (e *BasicHTMLElement) Focus() { - e.Call("focus") -} - -// Type BasicElement implements the Element interface and is embedded -// by concrete element types and HTML element types. -type BasicElement struct { - *BasicNode -} - -func (e *BasicElement) Attributes() map[string]string { - o := e.Get("attributes") - attrs := map[string]string{} - length := o.Get("length").Int() - for i := 0; i < length; i++ { - item := o.Call("item", i) - attrs[item.Get("name").String()] = item.Get("value").String() - } - return attrs -} - -func (e *BasicElement) GetBoundingClientRect() ClientRect { - obj := e.Call("getBoundingClientRect") - return ClientRect{Object: obj} -} - -func (e *BasicElement) PreviousElementSibling() Element { - return wrapElement(e.Get("previousElementSibling")) -} - -func (e *BasicElement) NextElementSibling() Element { - return wrapElement(e.Get("nextElementSibling")) -} - -func (e *BasicElement) Class() *TokenList { - return &TokenList{dtl: e.Get("classList"), o: e.Object, sa: "className"} -} - -// SetClass sets the element's className attribute to s. Consider -// using the Class method instead. -func (e *BasicElement) SetClass(s string) { - e.Set("className", s) -} - -func (e *BasicElement) ID() string { - return e.Get("id").String() -} - -func (e *BasicElement) SetID(s string) { - e.Set("id", s) -} - -func (e *BasicElement) TagName() string { - return e.Get("tagName").String() -} - -func (e *BasicElement) GetAttribute(name string) string { - return toString(e.Call("getAttribute", name)) -} - -func (e *BasicElement) GetAttributeNS(ns string, name string) string { - return toString(e.Call("getAttributeNS", ns, name)) -} - -func (e *BasicElement) GetElementsByClassName(s string) []Element { - return nodeListToElements(e.Call("getElementsByClassName", s)) -} - -func (e *BasicElement) GetElementsByTagName(s string) []Element { - return nodeListToElements(e.Call("getElementsByTagName", s)) -} - -func (e *BasicElement) GetElementsByTagNameNS(ns string, name string) []Element { - return nodeListToElements(e.Call("getElementsByTagNameNS", ns, name)) -} - -func (e *BasicElement) HasAttribute(s string) bool { - return e.Call("hasAttribute", s).Bool() -} - -func (e *BasicElement) HasAttributeNS(ns string, name string) bool { - return e.Call("hasAttributeNS", ns, name).Bool() -} - -func (e *BasicElement) QuerySelector(s string) Element { - return wrapElement(e.Call("querySelector", s)) -} - -func (e *BasicElement) QuerySelectorAll(s string) []Element { - return nodeListToElements(e.Call("querySelectorAll", s)) -} - -func (e *BasicElement) RemoveAttribute(s string) { - e.Call("removeAttribute", s) -} - -func (e *BasicElement) RemoveAttributeNS(ns string, name string) { - e.Call("removeAttributeNS", ns, name) -} - -func (e *BasicElement) SetAttribute(name string, value string) { - e.Call("setAttribute", name, value) -} - -func (e *BasicElement) SetAttributeNS(ns string, name string, value string) { - e.Call("setAttributeNS", ns, name, value) -} - -func (e *BasicElement) InnerHTML() string { - return e.Get("innerHTML").String() -} - -func (e *BasicElement) SetInnerHTML(s string) { - e.Set("innerHTML", s) -} - -func (e *BasicElement) OuterHTML() string { - return e.Get("outerHTML").String() -} - -func (e *BasicElement) SetOuterHTML(s string) { - e.Set("outerHTML", s) -} - -type HTMLAnchorElement struct { - *BasicHTMLElement - *URLUtils - HrefLang string `js:"hreflang"` - Media string `js:"media"` - TabIndex int `js:"tabIndex"` - Target string `js:"target"` - Text string `js:"text"` - Type string `js:"type"` -} - -func (e *HTMLAnchorElement) Rel() *TokenList { - return &TokenList{dtl: e.Get("relList"), o: e.Object, sa: "rel"} -} - -type HTMLAppletElement struct { - *BasicHTMLElement - Alt string `js:"alt"` - Coords string `js:"coords"` - HrefLang string `js:"hreflang"` - Media string `js:"media"` - Search string `js:"search"` - Shape string `js:"shape"` - TabIndex int `js:"tabIndex"` - Target string `js:"target"` - Type string `js:"type"` -} - -func (e *HTMLAppletElement) Rel() *TokenList { - return &TokenList{dtl: e.Get("relList"), o: e.Object, sa: "rel"} -} - -type HTMLAreaElement struct { - *BasicHTMLElement - *URLUtils - Alt string `js:"alt"` - Coords string `js:"coords"` - HrefLang string `js:"hreflang"` - Media string `js:"media"` - Search string `js:"search"` - Shape string `js:"shape"` - TabIndex int `js:"tabIndex"` - Target string `js:"target"` - Type string `js:"type"` -} - -func (e *HTMLAreaElement) Rel() *TokenList { - return &TokenList{dtl: e.Get("relList"), o: e.Object, sa: "rel"} -} - -type HTMLAudioElement struct{ *HTMLMediaElement } - -type HTMLBRElement struct{ *BasicHTMLElement } - -type HTMLBaseElement struct{ *BasicHTMLElement } - -func (e *HTMLBaseElement) Href() string { - return e.Get("href").String() -} - -func (e *HTMLBaseElement) Target() string { - return e.Get("target").String() -} - -type HTMLBodyElement struct{ *BasicHTMLElement } - -type HTMLButtonElement struct { - *BasicHTMLElement - AutoFocus bool `js:"autofocus"` - Disabled bool `js:"disabled"` - FormAction string `js:"formAction"` - FormEncType string `js:"formEncType"` - FormMethod string `js:"formMethod"` - FormNoValidate bool `js:"formNoValidate"` - FormTarget string `js:"formTarget"` - Name string `js:"name"` - TabIndex int `js:"tabIndex"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - Value string `js:"value"` - WillValidate bool `js:"willValidate"` -} - -func (e *HTMLButtonElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLButtonElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -func (e *HTMLButtonElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLButtonElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLButtonElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -type HTMLCanvasElement struct { - *BasicHTMLElement - Height int `js:"height"` - Width int `js:"width"` -} - -type CanvasRenderingContext2D struct { - *js.Object - - // Colors, Styles, and Shadows - - FillStyle string `js:"fillStyle"` - StrokeStyle string `js:"strokeStyle"` - ShadowColor string `js:"shadowColor"` - ShadowBlur int `js:"shadowBlur"` - ShadowOffsetX int `js:"shadowOffsetX"` - ShadowOffsetY int `js:"shadowOffsetY"` - - // Line Styles - - LineCap string `js:"lineCap"` - LineJoin string `js:"lineJoin"` - LineWidth int `js:"lineWidth"` - MiterLimit int `js:"miterLimit"` - - // Text - - Font string `js:"font"` - TextAlign string `js:"textAlign"` - TextBaseline string `js:"textBaseline"` - - // Compositing - - GlobalAlpha float64 `js:"globalAlpha"` - GlobalCompositeOperation string `js:"globalCompositeOperation"` -} - -type ImageData struct { - *js.Object - - Width int `js:"width"` - Height int `js:"height"` - Data *js.Object `js:"data"` -} - -func (m *ImageData) ColorModel() color.Model { return color.NRGBAModel } - -func (m *ImageData) Bounds() image.Rectangle { - return image.Rect(0, 0, m.Width, m.Height) -} - -func (m *ImageData) At(x, y int) color.Color { - return m.NRGBAAt(x, y) -} - -func (m *ImageData) NRGBAAt(x, y int) color.NRGBA { - if x < 0 || x >= m.Width || - y < 0 || y >= m.Height { - return color.NRGBA{} - } - i := (y*m.Width + x) * 4 - return color.NRGBA{ - R: uint8(m.Data.Index(i + 0).Int()), - G: uint8(m.Data.Index(i + 1).Int()), - B: uint8(m.Data.Index(i + 2).Int()), - A: uint8(m.Data.Index(i + 3).Int()), - } -} - -func (m *ImageData) Set(x, y int, c color.Color) { - if x < 0 || x >= m.Width || - y < 0 || y >= m.Height { - return - } - c1 := color.NRGBAModel.Convert(c).(color.NRGBA) - i := (y*m.Width + x) * 4 - m.Data.SetIndex(i+0, c1.R) - m.Data.SetIndex(i+1, c1.G) - m.Data.SetIndex(i+2, c1.B) - m.Data.SetIndex(i+3, c1.A) -} - -func (m *ImageData) SetNRGBA(x, y int, c color.NRGBA) { - if x < 0 || x >= m.Width || - y < 0 || y >= m.Height { - return - } - i := (y*m.Width + x) * 4 - m.Data.SetIndex(i+0, c.R) - m.Data.SetIndex(i+1, c.G) - m.Data.SetIndex(i+2, c.B) - m.Data.SetIndex(i+3, c.A) -} - -// CanvasGradient represents an opaque object describing a gradient. -// It is returned by the methods CanvasRenderingContext2D.CreateLinearGradient -// or CanvasRenderingContext2D.CreateRadialGradient. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/CanvasGradient. -type CanvasGradient struct { - *js.Object -} - -// AddColorStop adds a new stop, defined by an offset and a color, to the gradient. -// It panics with *js.Error if the offset is not between 0 and 1, or if the color -// can't be parsed as a CSS . -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/CanvasGradient/addColorStop. -func (cg *CanvasGradient) AddColorStop(offset float64, color string) { - cg.Call("addColorStop", offset, color) -} - -// CanvasPattern represents an opaque object describing a pattern. -// It is based on an image, a canvas or a video, created by the -// CanvasRenderingContext2D.CreatePattern method. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/CanvasPattern. -type CanvasPattern struct { - *js.Object -} - -type TextMetrics struct { - *js.Object - - Width float64 `js:"width"` - ActualBoundingBoxLeft float64 `js:"actualBoundingBoxLeft"` - ActualBoundingBoxRight float64 `js:"actualBoundingBoxRight"` - FontBoundingBoxAscent float64 `js:"fontBoundingBoxAscent"` - FontBoundingBoxDescent float64 `js:"fontBoundingBoxDescent"` - ActualBoundingBoxAscent float64 `js:"actualBoundingBoxAscent"` - ActualBoundingBoxDescent float64 `js:"actualBoundingBoxDescent"` - EmHeightAscent float64 `js:"emHeightAscent"` - EmHeightDescent float64 `js:"emHeightDescent"` - HangingBaseline float64 `js:"hangingBaseline"` - AlphabeticBaseline float64 `js:"alphabeticBaseline"` - IdeographicBaseline float64 `js:"ideographicBaseline"` -} - -// Creating canvas 2d context - -func (e *HTMLCanvasElement) GetContext2d() *CanvasRenderingContext2D { - ctx := e.GetContext("2d") - return &CanvasRenderingContext2D{Object: ctx} -} - -func (e *HTMLCanvasElement) GetContext(param string) *js.Object { - return e.Call("getContext", param) -} - -// Drawing Rectangles - -func (ctx *CanvasRenderingContext2D) ClearRect(x, y, width, height float64) { - ctx.Call("clearRect", x, y, width, height) -} - -func (ctx *CanvasRenderingContext2D) FillRect(x, y, width, height float64) { - ctx.Call("fillRect", x, y, width, height) -} - -func (ctx *CanvasRenderingContext2D) StrokeRect(x, y, width, height float64) { - ctx.Call("strokeRect", x, y, width, height) -} - -// Drawing Text - -// FillText fills a given text at the given (x, y) position. -// If the optional maxWidth parameter is not -1, -// the text will be scaled to fit that width. -func (ctx *CanvasRenderingContext2D) FillText(text string, x, y, maxWidth float64) { - if maxWidth == -1 { - ctx.Call("fillText", text, x, y) - return - } - - ctx.Call("fillText", text, x, y, maxWidth) -} - -// StrokeText strokes a given text at the given (x, y) position. -// If the optional maxWidth parameter is not -1, -// the text will be scaled to fit that width. -func (ctx *CanvasRenderingContext2D) StrokeText(text string, x, y, maxWidth float64) { - if maxWidth == -1 { - ctx.Call("strokeText", text, x, y) - return - } - - ctx.Call("strokeText", text, x, y, maxWidth) -} -func (ctx *CanvasRenderingContext2D) MeasureText(text string) *TextMetrics { - textMetrics := ctx.Call("measureText", text) - return &TextMetrics{Object: textMetrics} -} - -// Line styles - -func (ctx *CanvasRenderingContext2D) GetLineDash() []float64 { - var dashes []float64 - for _, dash := range ctx.Call("getLineDash").Interface().([]interface{}) { - dashes = append(dashes, dash.(float64)) - } - return dashes -} - -func (ctx *CanvasRenderingContext2D) SetLineDash(dashes []float64) { - ctx.Call("setLineDash", dashes) -} - -// Gradients and patterns - -// CreateLinearGradient creates a linear gradient along the line given -// by the coordinates represented by the parameters. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/createLinearGradient. -func (ctx *CanvasRenderingContext2D) CreateLinearGradient(x0, y0, x1, y1 float64) *CanvasGradient { - return &CanvasGradient{Object: ctx.Call("createLinearGradient", x0, y0, x1, y1)} -} - -// CreateRadialGradient creates a radial gradient given by the coordinates of the two circles -// represented by the parameters. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/createRadialGradient. -func (ctx *CanvasRenderingContext2D) CreateRadialGradient(x0, y0, r0, x1, y1, r1 float64) *CanvasGradient { - return &CanvasGradient{Object: ctx.Call("createRadialGradient", x0, y0, r0, x1, y1, r1)} -} - -// CreatePattern creates a pattern using the specified image (a CanvasImageSource). -// It repeats the source in the directions specified by the repetition argument. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/createPattern. -func (ctx *CanvasRenderingContext2D) CreatePattern(image Element, repetition string) *CanvasPattern { - return &CanvasPattern{Object: ctx.Call("createPattern", image, repetition)} -} - -// Paths - -func (ctx *CanvasRenderingContext2D) BeginPath() { - ctx.Call("beginPath") -} - -func (ctx *CanvasRenderingContext2D) ClosePath() { - ctx.Call("closePath") -} - -func (ctx *CanvasRenderingContext2D) MoveTo(x, y float64) { - ctx.Call("moveTo", x, y) -} - -func (ctx *CanvasRenderingContext2D) LineTo(x, y float64) { - ctx.Call("lineTo", x, y) -} - -func (ctx *CanvasRenderingContext2D) BezierCurveTo(cp1x, cp1y, cp2x, cp2y, x, y float64) { - ctx.Call("bezierCurveTo", cp1x, cp1y, cp2x, cp2y, x, y) -} - -func (ctx *CanvasRenderingContext2D) QuadraticCurveTo(cpx, cpy, x, y float64) { - ctx.Call("quadraticCurveTo", cpx, cpy, x, y) -} - -func (ctx *CanvasRenderingContext2D) Arc(x, y, r, sAngle, eAngle float64, counterclockwise bool) { - ctx.Call("arc", x, y, r, sAngle, eAngle, counterclockwise) -} - -func (ctx *CanvasRenderingContext2D) ArcTo(x1, y1, x2, y2, r float64) { - ctx.Call("arcTo", x1, y1, x2, y2, r) -} - -func (ctx *CanvasRenderingContext2D) Ellipse(x, y, radiusX, radiusY, rotation, startAngle, endAngle float64, anticlockwise bool) { - ctx.Call("ellipse", x, y, radiusX, radiusY, rotation, startAngle, endAngle, anticlockwise) -} - -func (ctx *CanvasRenderingContext2D) Rect(x, y, width, height float64) { - ctx.Call("rect", x, y, width, height) -} - -// Drawing paths - -func (ctx *CanvasRenderingContext2D) Fill() { - ctx.Call("fill") -} - -func (ctx *CanvasRenderingContext2D) Stroke() { - ctx.Call("stroke") -} - -func (ctx *CanvasRenderingContext2D) DrawFocusIfNeeded(element HTMLElement, path *js.Object) { - ctx.Call("drawFocusIfNeeded", element, path) -} - -func (ctx *CanvasRenderingContext2D) ScrollPathIntoView(path *js.Object) { - ctx.Call("scrollPathIntoView", path) -} - -func (ctx *CanvasRenderingContext2D) Clip() { - ctx.Call("clip") -} - -func (ctx *CanvasRenderingContext2D) IsPointInPath(x, y float64) bool { - return ctx.Call("isPointInPath", x, y).Bool() -} - -func (ctx *CanvasRenderingContext2D) IsPointInStroke(path *js.Object, x, y float64) bool { - return ctx.Call("isPointInStroke", path, x, y).Bool() -} - -// Transformations - -func (ctx *CanvasRenderingContext2D) Rotate(angle float64) { - ctx.Call("rotate", angle) -} - -func (ctx *CanvasRenderingContext2D) Scale(scaleWidth, scaleHeight float64) { - ctx.Call("scale", scaleWidth, scaleHeight) -} - -func (ctx *CanvasRenderingContext2D) Translate(x, y float64) { - ctx.Call("translate", x, y) -} - -func (ctx *CanvasRenderingContext2D) Transform(a, b, c, d, e, f float64) { - ctx.Call("transform", a, b, c, d, e, f) -} - -func (ctx *CanvasRenderingContext2D) SetTransform(a, b, c, d, e, f float64) { - ctx.Call("setTransform", a, b, c, d, e, f) -} - -func (ctx *CanvasRenderingContext2D) ResetTransform() { - ctx.Call("resetTransform") -} - -// Drawing images - -func (ctx *CanvasRenderingContext2D) DrawImage(image Element, dx, dy float64) { - ctx.Call("drawImage", image, dx, dy) -} - -func (ctx *CanvasRenderingContext2D) DrawImageWithDst(image Element, dx, dy, dWidth, dHeight float64) { - ctx.Call("drawImage", image, dx, dy, dWidth, dHeight) -} - -func (ctx *CanvasRenderingContext2D) DrawImageWithSrcAndDst(image Element, sx, sy, sWidth, sHeight, dx, dy, dWidth, dHeight float64) { - ctx.Call("drawImage", image, sx, sy, sWidth, sHeight, dx, dy, dWidth, dHeight) -} - -// Pixel manipulation - -func (ctx *CanvasRenderingContext2D) CreateImageData(width, height int) *ImageData { - return &ImageData{Object: ctx.Call("createImageData", width, height)} -} - -func (ctx *CanvasRenderingContext2D) GetImageData(sx, sy, sw, sh int) *ImageData { - return &ImageData{Object: ctx.Call("getImageData", sx, sy, sw, sh)} -} - -func (ctx *CanvasRenderingContext2D) PutImageData(imageData *ImageData, dx, dy float64) { - ctx.Call("putImageData", imageData, dx, dy) -} - -func (ctx *CanvasRenderingContext2D) PutImageDataDirty(imageData *ImageData, dx, dy float64, dirtyX, dirtyY, dirtyWidth, dirtyHeight int) { - ctx.Call("putImageData", imageData, dx, dy, dirtyX, dirtyY, dirtyWidth, dirtyHeight) -} - -// State - -func (ctx *CanvasRenderingContext2D) Save() { - ctx.Call("save") -} - -func (ctx *CanvasRenderingContext2D) Restore() { - ctx.Call("restore") -} - -// TODO Hit regions: -// addHitRegion -// removeHitRegion -// clearHitRegions - -type HTMLDListElement struct{ *BasicHTMLElement } - -type HTMLDataElement struct { - *BasicHTMLElement - Value string `js:"value"` -} - -type HTMLDataListElement struct{ *BasicHTMLElement } - -func (e *HTMLDataListElement) Options() []*HTMLOptionElement { - return getOptions(e.Object, "options") -} - -type HTMLDirectoryElement struct{ *BasicHTMLElement } -type HTMLDivElement struct{ *BasicHTMLElement } - -type HTMLEmbedElement struct { - *BasicHTMLElement - Src string `js:"src"` - Type string `js:"type"` - Width string `js:"width"` -} - -type HTMLFieldSetElement struct { - *BasicHTMLElement - Disabled bool `js:"disabled"` - Name string `js:"name"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - WillValidate bool `js:"willValidate"` -} - -func (e *HTMLFieldSetElement) Elements() []HTMLElement { - return nodeListToHTMLElements(e.Get("elements")) -} - -func (e *HTMLFieldSetElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLFieldSetElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLFieldSetElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLFieldSetElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -type HTMLFontElement struct{ *BasicHTMLElement } - -type HTMLFormElement struct { - *BasicHTMLElement - AcceptCharset string `js:"acceptCharset"` - Action string `js:"action"` - Autocomplete string `js:"autocomplete"` - Encoding string `js:"encoding"` - Enctype string `js:"enctype"` - Length int `js:"length"` - Method string `js:"method"` - Name string `js:"name"` - NoValidate bool `js:"noValidate"` - Target string `js:"target"` -} - -func (e *HTMLFormElement) Elements() []HTMLElement { - return nodeListToHTMLElements(e.Get("elements")) -} - -func (e *HTMLFormElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLFormElement) Submit() { - e.Call("submit") -} - -func (e *HTMLFormElement) Reset() { - e.Call("reset") -} - -func (e *HTMLFormElement) Item(index int) HTMLElement { - return wrapHTMLElement(e.Call("item", index)) -} - -func (e *HTMLFormElement) NamedItem(name string) HTMLElement { - return wrapHTMLElement(e.Call("namedItem", name)) -} - -type HTMLFrameElement struct{ *BasicHTMLElement } -type HTMLFrameSetElement struct{ *BasicHTMLElement } -type HTMLHRElement struct{ *BasicHTMLElement } -type HTMLHeadElement struct{ *BasicHTMLElement } -type HTMLHeadingElement struct{ *BasicHTMLElement } -type HTMLHtmlElement struct{ *BasicHTMLElement } - -type HTMLIFrameElement struct { - *BasicHTMLElement - Width string `js:"width"` - Height string `js:"height"` - Name string `js:"name"` - Src string `js:"src"` - SrcDoc string `js:"srcdoc"` - Seamless bool `js:"seamless"` - // TODO sandbox attribute -} - -func (e *HTMLIFrameElement) ContentDocument() Document { - return wrapDocument(e.Get("contentDocument")) -} - -func (e *HTMLIFrameElement) ContentWindow() Window { - return &window{e.Get("contentWindow")} -} - -type HTMLImageElement struct { - *BasicHTMLElement - Complete bool `js:"complete"` - CrossOrigin string `js:"crossOrigin"` - Height int `js:"height"` - IsMap bool `js:"isMap"` - NaturalHeight int `js:"naturalHeight"` - NaturalWidth int `js:"naturalWidth"` - Src string `js:"src"` - UseMap string `js:"useMap"` - Width int `js:"width"` - // TODO constructor -} - -type HTMLInputElement struct { - *BasicHTMLElement - Accept string `js:"accept"` - Alt string `js:"alt"` - Autocomplete string `js:"autocomplete"` - Autofocus bool `js:"autofocus"` - Checked bool `js:"checked"` - DefaultChecked bool `js:"defaultChecked"` - DefaultValue string `js:"defaultValue"` - DirName string `js:"dirName"` - Disabled bool `js:"disabled"` - FormAction string `js:"formAction"` - FormEncType string `js:"formEncType"` - FormMethod string `js:"formMethod"` - FormNoValidate bool `js:"formNoValidate"` - FormTarget string `js:"formTarget"` - Height string `js:"height"` - Indeterminate bool `js:"indeterminate"` - Max string `js:"max"` - MaxLength int `js:"maxLength"` - Min string `js:"min"` - Multiple bool `js:"multiple"` - Name string `js:"name"` - Pattern string `js:"pattern"` - Placeholder string `js:"placeholder"` - ReadOnly bool `js:"readOnly"` - Required bool `js:"required"` - SelectionDirection string `js:"selectionDirection"` - SelectionEnd int `js:"selectionEnd"` - SelectionStart int `js:"selectionStart"` - Size int `js:"size"` - Src string `js:"src"` - Step string `js:"step"` - TabIndex int `js:"tabIndex"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - Value string `js:"value"` - ValueAsDate time.Time `js:"valueAsDate"` - ValueAsNumber float64 `js:"valueAsNumber"` - Width string `js:"width"` - WillValidate bool `js:"willValidate"` -} - -// File represents files as can be obtained from file choosers or drag -// and drop. The dom package does not define any methods on File nor -// does it provide access to the blob or a way to read it. -type File struct { - *js.Object -} - -func (e *HTMLInputElement) Files() []*File { - files := e.Get("files") - out := make([]*File, files.Get("length").Int()) - for i := range out { - out[i] = &File{files.Call("item", i)} - } - return out -} - -func (e *HTMLInputElement) List() *HTMLDataListElement { - list := wrapHTMLElement(e.Get("list")) - if list == nil { - return nil - } - return list.(*HTMLDataListElement) -} - -func (e *HTMLInputElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -func (e *HTMLInputElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLInputElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLInputElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLInputElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -func (e *HTMLInputElement) Select() { - e.Call("select") -} - -func (e *HTMLInputElement) SetSelectionRange(start, end int, direction string) { - e.Call("setSelectionRange", start, end, direction) -} - -func (e *HTMLInputElement) StepDown(n int) error { - return callRecover(e.Object, "stepDown", n) -} - -func (e *HTMLInputElement) StepUp(n int) error { - return callRecover(e.Object, "stepUp", n) -} - -type HTMLKeygenElement struct { - *BasicHTMLElement - Autofocus bool `js:"autofocus"` - Challenge string `js:"challenge"` - Disabled bool `js:"disabled"` - Keytype string `js:"keytype"` - Name string `js:"name"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - WillValidate bool `js:"willValidate"` -} - -func (e *HTMLKeygenElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLKeygenElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -func (e *HTMLKeygenElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLKeygenElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLKeygenElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -type HTMLLIElement struct { - *BasicHTMLElement - Value int `js:"value"` -} - -type HTMLLabelElement struct { - *BasicHTMLElement - For string `js:"htmlFor"` -} - -func (e *HTMLLabelElement) Control() HTMLElement { - return wrapHTMLElement(e.Get("control")) -} - -func (e *HTMLLabelElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -type HTMLLegendElement struct{ *BasicHTMLElement } - -func (e *HTMLLegendElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -type HTMLLinkElement struct { - *BasicHTMLElement - Disabled bool `js:"disabled"` - Href string `js:"href"` - HrefLang string `js:"hrefLang"` - Media string `js:"media"` - Type string `js:"type"` -} - -func (e *HTMLLinkElement) Rel() *TokenList { - return &TokenList{dtl: e.Get("relList"), o: e.Object, sa: "rel"} -} - -func (e *HTMLLinkElement) Sizes() *TokenList { - return &TokenList{dtl: e.Get("sizes"), o: e.Object} -} - -func (e *HTMLLinkElement) Sheet() StyleSheet { - // FIXME implement - panic("not implemented") -} - -type HTMLMapElement struct { - *BasicHTMLElement - Name string `js:"name"` -} - -func (e *HTMLMapElement) Areas() []*HTMLAreaElement { - areas := nodeListToElements(e.Get("areas")) - out := make([]*HTMLAreaElement, len(areas)) - for i, area := range areas { - out[i] = area.(*HTMLAreaElement) - } - return out -} - -func (e *HTMLMapElement) Images() []HTMLElement { - return nodeListToHTMLElements(e.Get("areas")) -} - -type HTMLMediaElement struct { - *BasicHTMLElement - Paused bool `js:"paused"` -} - -func (e *HTMLMediaElement) Play() { - e.Call("play") -} - -func (e *HTMLMediaElement) Pause() { - e.Call("pause") -} - -type HTMLMenuElement struct{ *BasicHTMLElement } - -type HTMLMetaElement struct { - *BasicHTMLElement - Content string `js:"content"` - HTTPEquiv string `js:"httpEquiv"` - Name string `js:"name"` -} - -type HTMLMeterElement struct { - *BasicHTMLElement - High float64 `js:"high"` - Low float64 `js:"low"` - Max float64 `js:"max"` - Min float64 `js:"min"` - Optimum float64 `js:"optimum"` -} - -func (e HTMLMeterElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -type HTMLModElement struct { - *BasicHTMLElement - Cite string `js:"cite"` - DateTime string `js:"dateTime"` -} - -type HTMLOListElement struct { - *BasicHTMLElement - Reversed bool `js:"reversed"` - Start int `js:"start"` - Type string `js:"type"` -} - -type HTMLObjectElement struct { - *BasicHTMLElement - Data string `js:"data"` - Height string `js:"height"` - Name string `js:"name"` - TabIndex int `js:"tabIndex"` - Type string `js:"type"` - TypeMustMatch bool `js:"typeMustMatch"` - UseMap string `js:"useMap"` - ValidationMessage string `js:"validationMessage"` - With string `js:"with"` - WillValidate bool `js:"willValidate"` -} - -func (e *HTMLObjectElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLObjectElement) ContentDocument() Document { - return wrapDocument(e.Get("contentDocument")) -} - -func (e *HTMLObjectElement) ContentWindow() Window { - return &window{e.Get("contentWindow")} -} - -func (e *HTMLObjectElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLObjectElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLObjectElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -type HTMLOptGroupElement struct { - *BasicHTMLElement - Disabled bool `js:"disabled"` - Label string `js:"label"` -} - -type HTMLOptionElement struct { - *BasicHTMLElement - DefaultSelected bool `js:"defaultSelected"` - Disabled bool `js:"disabled"` - Index int `js:"index"` - Label string `js:"label"` - Selected bool `js:"selected"` - Text string `js:"text"` - Value string `js:"value"` -} - -func (e *HTMLOptionElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -type HTMLOutputElement struct { - *BasicHTMLElement - DefaultValue string `js:"defaultValue"` - Name string `js:"name"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - Value string `js:"value"` - WillValidate bool `js:"willValidate"` -} - -func (e *HTMLOutputElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLOutputElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -func (e *HTMLOutputElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLOutputElement) For() *TokenList { - return &TokenList{dtl: e.Get("htmlFor"), o: e.Object} -} - -func (e *HTMLOutputElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLOutputElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -type HTMLParagraphElement struct{ *BasicHTMLElement } - -type HTMLParamElement struct { - *BasicHTMLElement - Name string `js:"name"` - Value string `js:"value"` -} - -type HTMLPreElement struct{ *BasicHTMLElement } - -type HTMLProgressElement struct { - *BasicHTMLElement - Max float64 `js:"max"` - Position float64 `js:"position"` - Value float64 `js:"value"` -} - -func (e HTMLProgressElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -type HTMLQuoteElement struct { - *BasicHTMLElement - Cite string `js:"cite"` -} - -type HTMLScriptElement struct { - *BasicHTMLElement - Type string `js:"type"` - Src string `js:"src"` - Charset string `js:"charset"` - Async bool `js:"async"` - Defer bool `js:"defer"` - Text string `js:"text"` -} - -type HTMLSelectElement struct { - *BasicHTMLElement - Autofocus bool `js:"autofocus"` - Disabled bool `js:"disabled"` - Length int `js:"length"` - Multiple bool `js:"multiple"` - Name string `js:"name"` - Required bool `js:"required"` - SelectedIndex int `js:"selectedIndex"` - Size int `js:"size"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - Value string `js:"value"` - WillValidate bool `js:"willValidate"` -} - -func (e *HTMLSelectElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -func (e *HTMLSelectElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLSelectElement) Options() []*HTMLOptionElement { - return getOptions(e.Object, "options") -} - -func (e *HTMLSelectElement) SelectedOptions() []*HTMLOptionElement { - return getOptions(e.Object, "selectedOptions") -} - -func (e *HTMLSelectElement) Item(index int) *HTMLOptionElement { - el := wrapHTMLElement(e.Call("item", index)) - if el == nil { - return nil - } - return el.(*HTMLOptionElement) -} - -func (e *HTMLSelectElement) NamedItem(name string) *HTMLOptionElement { - el := wrapHTMLElement(e.Call("namedItem", name)) - if el == nil { - return nil - } - return el.(*HTMLOptionElement) -} - -// TODO(dominikh): Not implementing Add or Remove for now. For one, -// Add with "before" behaves weird when dealing with optgroups. Also, -// there's already InsertBefore and RemoveChild which can be used -// instead. - -func (e *HTMLSelectElement) Validity() *ValidityState { - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLSelectElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLSelectElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -type HTMLSourceElement struct { - *BasicHTMLElement - Media string `js:"media"` - Src string `js:"src"` - Type string `js:"type"` -} - -type HTMLSpanElement struct{ *BasicHTMLElement } -type HTMLStyleElement struct{ *BasicHTMLElement } -type HTMLTableCaptionElement struct{ *BasicHTMLElement } - -type HTMLTableCellElement struct { - *BasicHTMLElement - ColSpan int `js:"colSpan"` - RowSpan int `js:"rowSpan"` - CellIndex int `js:"cellIndex"` - // TODO headers -} - -type HTMLTableColElement struct { - *BasicHTMLElement - Span int `js:"span"` -} - -type HTMLTableDataCellElement struct{ *BasicHTMLElement } -type HTMLTableElement struct{ *BasicHTMLElement } - -type HTMLTableHeaderCellElement struct { - *BasicHTMLElement - Abbr string `js:"abbr"` - Scope string `js:"scope"` -} - -type HTMLTableRowElement struct { - *BasicHTMLElement - RowIndex int `js:"rowIndex"` - SectionRowIndex int `js:"sectionRowIndex"` -} - -func (e *HTMLTableRowElement) Cells() []*HTMLTableCellElement { - cells := nodeListToElements(e.Get("cells")) - out := make([]*HTMLTableCellElement, len(cells)) - for i, cell := range cells { - out[i] = cell.(*HTMLTableCellElement) - } - return out -} - -func (e *HTMLTableRowElement) InsertCell(index int) *HTMLTableCellElement { - return wrapHTMLElement(e.Call("insertCell", index)).(*HTMLTableCellElement) -} - -func (e *HTMLTableRowElement) DeleteCell(index int) { - // FIXME exception handling/check that index is in bounds - e.Call("deleteCell", index) -} - -type HTMLTableSectionElement struct{ *BasicHTMLElement } - -func (e *HTMLTableSectionElement) Rows() []*HTMLTableRowElement { - rows := nodeListToElements(e.Get("rows")) - out := make([]*HTMLTableRowElement, len(rows)) - for i, row := range rows { - out[i] = row.(*HTMLTableRowElement) - } - return out -} - -func (e *HTMLTableSectionElement) DeleteRow(index int) { - // FIXME exception handling/check that index is in bounds - e.Call("deleteRow", index) -} - -func (e *HTMLTableSectionElement) InsertRow(index int) *HTMLTableRowElement { - return wrapHTMLElement(e.Call("insertRow", index)).(*HTMLTableRowElement) -} - -type HTMLTextAreaElement struct { - *BasicHTMLElement - Autocomplete string `js:"autocomplete"` - Autofocus bool `js:"autofocus"` - Cols int `js:"cols"` - DefaultValue string `js:"defaultValue"` - DirName string `js:"dirName"` - Disabled bool `js:"disabled"` - MaxLength int `js:"maxLength"` - Name string `js:"name"` - Placeholder string `js:"placeholder"` - ReadOnly bool `js:"readOnly"` - Required bool `js:"required"` - Rows int `js:"rows"` - SelectionDirection string `js:"selectionDirection"` - SelectionStart int `js:"selectionStart"` - SelectionEnd int `js:"selectionEnd"` - TabIndex int `js:"tabIndex"` - TextLength int `js:"textLength"` - Type string `js:"type"` - ValidationMessage string `js:"validationMessage"` - Value string `js:"value"` - WillValidate bool `js:"willValidate"` - Wrap string `js:"wrap"` -} - -func (e *HTMLTextAreaElement) Form() *HTMLFormElement { - return getForm(e.Object) -} - -func (e *HTMLTextAreaElement) Labels() []*HTMLLabelElement { - return getLabels(e.Object) -} - -func (e *HTMLTextAreaElement) Validity() *ValidityState { - // TODO replace with a field once GopherJS supports that - return &ValidityState{Object: e.Get("validity")} -} - -func (e *HTMLTextAreaElement) CheckValidity() bool { - return e.Call("checkValidity").Bool() -} - -func (e *HTMLTextAreaElement) SetCustomValidity(s string) { - e.Call("setCustomValidity", s) -} - -func (e *HTMLTextAreaElement) Select() { - e.Call("select") -} - -func (e *HTMLTextAreaElement) SetSelectionRange(start, end int, direction string) { - e.Call("setSelectionRange", start, end, direction) -} - -type HTMLTimeElement struct { - *BasicHTMLElement - DateTime string `js:"dateTime"` -} - -type HTMLTitleElement struct { - *BasicHTMLElement - Text string `js:"text"` -} - -// TextTrack represents text track data for elements. It does -// not currently provide any methods or attributes and it hasn't been -// decided yet whether they will be added to this package or a -// separate package. -type TextTrack struct{ *js.Object } - -type HTMLTrackElement struct { - *BasicHTMLElement - Kind string `js:"kind"` - Src string `js:"src"` - Srclang string `js:"srclang"` - Label string `js:"label"` - Default bool `js:"default"` - ReadyState int `js:"readyState"` -} - -func (e *HTMLTrackElement) Track() *TextTrack { - return &TextTrack{e.Get("track")} -} - -type HTMLUListElement struct{ *BasicHTMLElement } -type HTMLUnknownElement struct{ *BasicHTMLElement } - -type HTMLVideoElement struct{ *HTMLMediaElement } - -type ValidityState struct { - *js.Object - CustomError bool `js:"customError"` - PatternMismatch bool `js:"patternMismatch"` - RangeOverflow bool `js:"rangeOverflow"` - RangeUnderflow bool `js:"rangeUnderflow"` - StepMismatch bool `js:"stepMismatch"` - TooLong bool `js:"tooLong"` - TypeMismatch bool `js:"typeMismatch"` - Valid bool `js:"valid"` - ValueMissing bool `js:"valueMissing"` -} - -type CSSStyleDeclaration struct{ *js.Object } - -func (css *CSSStyleDeclaration) ToMap() map[string]string { - m := make(map[string]string) - N := css.Get("length").Int() - for i := 0; i < N; i++ { - name := css.Call("item", i).String() - value := css.Call("getPropertyValue", name).String() - m[name] = value - } - - return m -} - -func (css *CSSStyleDeclaration) RemoveProperty(name string) { - css.Call("removeProperty", name) -} - -func (css *CSSStyleDeclaration) GetPropertyValue(name string) string { - return toString(css.Call("getPropertyValue", name)) -} - -func (css *CSSStyleDeclaration) GetPropertyPriority(name string) string { - return toString(css.Call("getPropertyPriority", name)) -} - -func (css *CSSStyleDeclaration) SetProperty(name, value, priority string) { - css.Call("setProperty", name, value, priority) -} - -func (css *CSSStyleDeclaration) Index(idx int) string { - return css.Call("index", idx).String() -} - -func (css *CSSStyleDeclaration) Length() int { - return css.Get("length").Int() -} - -type Text struct { - *BasicNode -} diff --git a/vendor/honnef.co/go/js/dom/events.go b/vendor/honnef.co/go/js/dom/events.go deleted file mode 100644 index b168e97b8..000000000 --- a/vendor/honnef.co/go/js/dom/events.go +++ /dev/null @@ -1,412 +0,0 @@ -package dom - -import ( - "time" - - "github.com/gopherjs/gopherjs/js" -) - -func WrapEvent(o *js.Object) Event { - return wrapEvent(o) -} - -func wrapEvent(o *js.Object) Event { - if o == nil || o == js.Undefined { - return nil - } - ev := &BasicEvent{o} - c := o.Get("constructor") - switch c { - case js.Global.Get("AnimationEvent"): - return &AnimationEvent{ev} - case js.Global.Get("AudioProcessingEvent"): - return &AudioProcessingEvent{ev} - case js.Global.Get("BeforeInputEvent"): - return &BeforeInputEvent{ev} - case js.Global.Get("BeforeUnloadEvent"): - return &BeforeUnloadEvent{ev} - case js.Global.Get("BlobEvent"): - return &BlobEvent{ev} - case js.Global.Get("ClipboardEvent"): - return &ClipboardEvent{ev} - case js.Global.Get("CloseEvent"): - return &CloseEvent{BasicEvent: ev} - case js.Global.Get("CompositionEvent"): - return &CompositionEvent{ev} - case js.Global.Get("CSSFontFaceLoadEvent"): - return &CSSFontFaceLoadEvent{ev} - case js.Global.Get("CustomEvent"): - return &CustomEvent{ev} - case js.Global.Get("DeviceLightEvent"): - return &DeviceLightEvent{ev} - case js.Global.Get("DeviceMotionEvent"): - return &DeviceMotionEvent{ev} - case js.Global.Get("DeviceOrientationEvent"): - return &DeviceOrientationEvent{ev} - case js.Global.Get("DeviceProximityEvent"): - return &DeviceProximityEvent{ev} - case js.Global.Get("DOMTransactionEvent"): - return &DOMTransactionEvent{ev} - case js.Global.Get("DragEvent"): - return &DragEvent{ev} - case js.Global.Get("EditingBeforeInputEvent"): - return &EditingBeforeInputEvent{ev} - case js.Global.Get("ErrorEvent"): - return &ErrorEvent{ev} - case js.Global.Get("FocusEvent"): - return &FocusEvent{ev} - case js.Global.Get("GamepadEvent"): - return &GamepadEvent{ev} - case js.Global.Get("HashChangeEvent"): - return &HashChangeEvent{ev} - case js.Global.Get("IDBVersionChangeEvent"): - return &IDBVersionChangeEvent{ev} - case js.Global.Get("KeyboardEvent"): - return &KeyboardEvent{BasicEvent: ev} - case js.Global.Get("MediaStreamEvent"): - return &MediaStreamEvent{ev} - case js.Global.Get("MessageEvent"): - return &MessageEvent{BasicEvent: ev} - case js.Global.Get("MouseEvent"): - return &MouseEvent{UIEvent: &UIEvent{ev}} - case js.Global.Get("MutationEvent"): - return &MutationEvent{ev} - case js.Global.Get("OfflineAudioCompletionEvent"): - return &OfflineAudioCompletionEvent{ev} - case js.Global.Get("PageTransitionEvent"): - return &PageTransitionEvent{ev} - case js.Global.Get("PointerEvent"): - return &PointerEvent{ev} - case js.Global.Get("PopStateEvent"): - return &PopStateEvent{ev} - case js.Global.Get("ProgressEvent"): - return &ProgressEvent{ev} - case js.Global.Get("RelatedEvent"): - return &RelatedEvent{ev} - case js.Global.Get("RTCPeerConnectionIceEvent"): - return &RTCPeerConnectionIceEvent{ev} - case js.Global.Get("SensorEvent"): - return &SensorEvent{ev} - case js.Global.Get("StorageEvent"): - return &StorageEvent{ev} - case js.Global.Get("SVGEvent"): - return &SVGEvent{ev} - case js.Global.Get("SVGZoomEvent"): - return &SVGZoomEvent{ev} - case js.Global.Get("TimeEvent"): - return &TimeEvent{ev} - case js.Global.Get("TouchEvent"): - return &TouchEvent{BasicEvent: ev} - case js.Global.Get("TrackEvent"): - return &TrackEvent{ev} - case js.Global.Get("TransitionEvent"): - return &TransitionEvent{ev} - case js.Global.Get("UIEvent"): - return &UIEvent{ev} - case js.Global.Get("UserProximityEvent"): - return &UserProximityEvent{ev} - case js.Global.Get("WheelEvent"): - return &WheelEvent{BasicEvent: ev} - default: - return ev - } -} - -const ( - EvPhaseNone = 0 - EvPhaseCapturing = 1 - EvPhaseAtTarget = 2 - EvPhaseBubbling = 3 -) - -type Event interface { - Bubbles() bool - Cancelable() bool - CurrentTarget() Element - DefaultPrevented() bool - EventPhase() int - Target() Element - Timestamp() time.Time - Type() string - PreventDefault() - StopImmediatePropagation() - StopPropagation() - Underlying() *js.Object -} - -// Type BasicEvent implements the Event interface and is embedded by -// concrete event types. -type BasicEvent struct{ *js.Object } - -type EventOptions struct { - Bubbles bool - Cancelable bool -} - -func CreateEvent(typ string, opts EventOptions) *BasicEvent { - var event = js.Global.Get("Event").New(typ, js.M{ - "bubbles": opts.Bubbles, - "cancelable": opts.Cancelable, - }) - return &BasicEvent{event} -} - -func (ev *BasicEvent) Bubbles() bool { - return ev.Get("bubbles").Bool() -} - -func (ev *BasicEvent) Cancelable() bool { - return ev.Get("cancelable").Bool() -} - -func (ev *BasicEvent) CurrentTarget() Element { - return wrapElement(ev.Get("currentTarget")) -} - -func (ev *BasicEvent) DefaultPrevented() bool { - return ev.Get("defaultPrevented").Bool() -} - -func (ev *BasicEvent) EventPhase() int { - return ev.Get("eventPhase").Int() -} - -func (ev *BasicEvent) Target() Element { - return wrapElement(ev.Get("target")) -} - -func (ev *BasicEvent) Timestamp() time.Time { - ms := ev.Get("timeStamp").Int() - s := ms / 1000 - ns := (ms % 1000 * 1e6) - return time.Unix(int64(s), int64(ns)) -} - -func (ev *BasicEvent) Type() string { - return ev.Get("type").String() -} - -func (ev *BasicEvent) PreventDefault() { - ev.Call("preventDefault") -} - -func (ev *BasicEvent) StopImmediatePropagation() { - ev.Call("stopImmediatePropagation") -} - -func (ev *BasicEvent) StopPropagation() { - ev.Call("stopPropagation") -} - -func (ev *BasicEvent) Underlying() *js.Object { - return ev.Object -} - -type AnimationEvent struct{ *BasicEvent } -type AudioProcessingEvent struct{ *BasicEvent } -type BeforeInputEvent struct{ *BasicEvent } -type BeforeUnloadEvent struct{ *BasicEvent } -type BlobEvent struct{ *BasicEvent } -type ClipboardEvent struct{ *BasicEvent } - -type CloseEvent struct { - *BasicEvent - Code int `js:"code"` - Reason string `js:"reason"` - WasClean bool `js:"wasClean"` -} - -type CompositionEvent struct{ *BasicEvent } -type CSSFontFaceLoadEvent struct{ *BasicEvent } -type CustomEvent struct{ *BasicEvent } -type DeviceLightEvent struct{ *BasicEvent } -type DeviceMotionEvent struct{ *BasicEvent } -type DeviceOrientationEvent struct{ *BasicEvent } -type DeviceProximityEvent struct{ *BasicEvent } -type DOMTransactionEvent struct{ *BasicEvent } -type DragEvent struct{ *BasicEvent } -type EditingBeforeInputEvent struct{ *BasicEvent } -type ErrorEvent struct{ *BasicEvent } - -type FocusEvent struct{ *BasicEvent } - -func (ev *FocusEvent) RelatedTarget() Element { - return wrapElement(ev.Get("relatedTarget")) -} - -type GamepadEvent struct{ *BasicEvent } -type HashChangeEvent struct{ *BasicEvent } -type IDBVersionChangeEvent struct{ *BasicEvent } - -const ( - KeyLocationStandard = 0 - KeyLocationLeft = 1 - KeyLocationRight = 2 - KeyLocationNumpad = 3 -) - -type KeyboardEvent struct { - *BasicEvent - AltKey bool `js:"altKey"` - CharCode int `js:"charCode"` - CtrlKey bool `js:"ctrlKey"` - Key string `js:"key"` - KeyIdentifier string `js:"keyIdentifier"` - KeyCode int `js:"keyCode"` - Locale string `js:"locale"` - Location int `js:"location"` - KeyLocation int `js:"keyLocation"` - MetaKey bool `js:"metaKey"` - Repeat bool `js:"repeat"` - ShiftKey bool `js:"shiftKey"` -} - -func (ev *KeyboardEvent) ModifierState(mod string) bool { - return ev.Call("getModifierState", mod).Bool() -} - -type MediaStreamEvent struct{ *BasicEvent } - -type MessageEvent struct { - *BasicEvent - Data *js.Object `js:"data"` -} - -type MouseEvent struct { - *UIEvent - AltKey bool `js:"altKey"` - Button int `js:"button"` - ClientX int `js:"clientX"` - ClientY int `js:"clientY"` - CtrlKey bool `js:"ctrlKey"` - MetaKey bool `js:"metaKey"` - MovementX int `js:"movementX"` - MovementY int `js:"movementY"` - ScreenX int `js:"screenX"` - ScreenY int `js:"screenY"` - ShiftKey bool `js:"shiftKey"` -} - -func (ev *MouseEvent) RelatedTarget() Element { - return wrapElement(ev.Get("relatedTarget")) -} - -func (ev *MouseEvent) ModifierState(mod string) bool { - return ev.Call("getModifierState", mod).Bool() -} - -type MutationEvent struct{ *BasicEvent } -type OfflineAudioCompletionEvent struct{ *BasicEvent } -type PageTransitionEvent struct{ *BasicEvent } -type PointerEvent struct{ *BasicEvent } -type PopStateEvent struct{ *BasicEvent } -type ProgressEvent struct{ *BasicEvent } -type RelatedEvent struct{ *BasicEvent } -type RTCPeerConnectionIceEvent struct{ *BasicEvent } -type SensorEvent struct{ *BasicEvent } -type StorageEvent struct{ *BasicEvent } -type SVGEvent struct{ *BasicEvent } -type SVGZoomEvent struct{ *BasicEvent } -type TimeEvent struct{ *BasicEvent } - -// TouchEvent represents an event sent when the state of contacts with a touch-sensitive -// surface changes. This surface can be a touch screen or trackpad, for example. The event -// can describe one or more points of contact with the screen and includes support for -// detecting movement, addition and removal of contact points, and so forth. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/TouchEvent. -type TouchEvent struct { - *BasicEvent - AltKey bool `js:"altKey"` - CtrlKey bool `js:"ctrlKey"` - MetaKey bool `js:"metaKey"` - ShiftKey bool `js:"shiftKey"` -} - -// ChangedTouches lists all individual points of contact whose states changed between -// the previous touch event and this one. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/TouchEvent/changedTouches. -func (ev *TouchEvent) ChangedTouches() []*Touch { - return touchListToTouches(ev.Get("changedTouches")) -} - -// TargetTouches lists all points of contact that are both currently in contact with the -// touch surface and were also started on the same element that is the target of the event. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/TouchEvent/targetTouches. -func (ev *TouchEvent) TargetTouches() []*Touch { - return touchListToTouches(ev.Get("targetTouches")) -} - -// Touches lists all current points of contact with the surface, regardless of target -// or changed status. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/TouchEvent/touches. -func (ev *TouchEvent) Touches() []*Touch { - return touchListToTouches(ev.Get("touches")) -} - -func touchListToTouches(tl *js.Object) []*Touch { - out := make([]*Touch, tl.Length()) - for i := range out { - out[i] = &Touch{Object: tl.Index(i)} - } - return out -} - -// Touch represents a single contact point on a touch-sensitive device. The contact point -// is commonly a finger or stylus and the device may be a touchscreen or trackpad. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/Touch. -type Touch struct { - *js.Object - Identifier int `js:"identifier"` - ScreenX float64 `js:"screenX"` - ScreenY float64 `js:"screenY"` - ClientX float64 `js:"clientX"` - ClientY float64 `js:"clientY"` - PageX float64 `js:"pageX"` - PageY float64 `js:"pageY"` - RadiusX float64 `js:"radiusX"` - RadiusY float64 `js:"radiusY"` - RotationAngle float64 `js:"rotationAngle"` - Force float64 `js:"force"` -} - -// Target returns the Element on which the touch point started when it was first placed -// on the surface, even if the touch point has since moved outside the interactive area -// of that element or even been removed from the document. -// -// Reference: https://developer.mozilla.org/en-US/docs/Web/API/Touch/target. -func (t *Touch) Target() Element { - return wrapElement(t.Get("target")) -} - -type TrackEvent struct{ *BasicEvent } -type TransitionEvent struct{ *BasicEvent } -type UIEvent struct{ *BasicEvent } -type UserProximityEvent struct{ *BasicEvent } - -const ( - DeltaPixel = 0 - DeltaLine = 1 - DeltaPage = 2 -) - -type WheelEvent struct { - *BasicEvent - DeltaX float64 `js:"deltaX"` - DeltaY float64 `js:"deltaY"` - DeltaZ float64 `js:"deltaZ"` - DeltaMode int `js:"deltaMode"` -} - -type EventTarget interface { - // AddEventListener adds a new event listener and returns the - // wrapper function it generated. If using RemoveEventListener, - // that wrapper has to be used. - AddEventListener(typ string, useCapture bool, listener func(Event)) func(*js.Object) - RemoveEventListener(typ string, useCapture bool, listener func(*js.Object)) - DispatchEvent(event Event) bool -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9dce3e330..740757844 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,26 +1,374 @@ -# github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c -github.com/gopherjs/gopherjs/compiler -github.com/gopherjs/gopherjs/compiler/analysis -github.com/gopherjs/gopherjs/compiler/astutil -github.com/gopherjs/gopherjs/compiler/filter -github.com/gopherjs/gopherjs/compiler/prelude -github.com/gopherjs/gopherjs/compiler/typesutil +# bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898 +bazil.org/fuse +bazil.org/fuse/fs +bazil.org/fuse/fuseutil +bazil.org/fuse/syscallx +# cloud.google.com/go v0.39.0 +cloud.google.com/go/compute/metadata +cloud.google.com/go/datastore +cloud.google.com/go/iam +cloud.google.com/go/internal +cloud.google.com/go/internal/fields +cloud.google.com/go/internal/optional +cloud.google.com/go/internal/trace +cloud.google.com/go/internal/version +cloud.google.com/go/logging +cloud.google.com/go/logging/apiv2 +cloud.google.com/go/logging/internal +cloud.google.com/go/storage +# github.com/FiloSottile/b2 v0.0.0-20170207175032-b197f7a2c317 +github.com/FiloSottile/b2 +# github.com/aws/aws-sdk-go v1.34.0 +github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/arn +github.com/aws/aws-sdk-go/aws/awserr +github.com/aws/aws-sdk-go/aws/awsutil +github.com/aws/aws-sdk-go/aws/client +github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/csm +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/context +github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/internal/strings +github.com/aws/aws-sdk-go/internal/sync/singleflight +github.com/aws/aws-sdk-go/private/checksum +github.com/aws/aws-sdk-go/private/protocol +github.com/aws/aws-sdk-go/private/protocol/eventstream +github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil +github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restxml +github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/s3/internal/arn +github.com/aws/aws-sdk-go/service/s3/s3iface +github.com/aws/aws-sdk-go/service/s3/s3manager +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/bradfitz/latlong v0.0.0-20140711231157-b74550508561 +github.com/bradfitz/latlong +# github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f +github.com/cznic/fileutil +# github.com/cznic/internal v0.0.0-20170905175358-4747030f7cf2 +github.com/cznic/internal/buffer +github.com/cznic/internal/file +github.com/cznic/internal/slice +# github.com/cznic/kv v0.0.0-20170515202733-892ccf731fb7 +github.com/cznic/kv +# github.com/cznic/lldb v1.1.0 +github.com/cznic/lldb +# github.com/cznic/mathutil v0.0.0-20180214153908-5455a562bccb +github.com/cznic/mathutil +# github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 +github.com/cznic/sortutil +# github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc +github.com/cznic/zappy +# github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 +github.com/edsrzf/mmap-go +# github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 +github.com/garyburd/go-oauth/oauth +# github.com/go-sql-driver/mysql v1.5.0 +github.com/go-sql-driver/mysql +# github.com/golang/protobuf v1.3.1 +github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty +github.com/golang/protobuf/ptypes/struct +github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/wrappers +# github.com/golang/snappy v0.0.0-20170215233205-553a64147049 +github.com/golang/snappy +# github.com/googleapis/gax-go/v2 v2.0.4 +github.com/googleapis/gax-go/v2 +# github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 github.com/gopherjs/gopherjs/js -# github.com/gopherjs/jsbuiltin v0.0.0-20180426082241-50091555e127 -github.com/gopherjs/jsbuiltin -# github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 -github.com/neelance/astrewrite -# golang.org/x/tools v0.0.0-20190529203303-fb6c8ffd2207 -golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/types/typeutil -golang.org/x/tools/go/internal/gcimporter -golang.org/x/tools/go/ast/astutil -# honnef.co/go/js/dom v0.0.0-20180323154144-6da835bec70f -honnef.co/go/js/dom -# myitcv.io v0.0.0-20190513145353-42487431adac -myitcv.io/react -myitcv.io/react/internal/bundle -myitcv.io/react/internal/core -myitcv.io/react/internal/dev -myitcv.io/react/internal/preact -myitcv.io/react/internal/prod +# github.com/gopherjs/jquery v0.0.0-20180404123100-3ba2b901425e +github.com/gopherjs/jquery +# github.com/gorilla/websocket v1.4.0 +github.com/gorilla/websocket +# github.com/hashicorp/golang-lru v0.5.1 +github.com/hashicorp/golang-lru/simplelru +# github.com/hjfreyer/taglib-go v0.0.0-20151027170453-0ef8bba9c41b +github.com/hjfreyer/taglib-go/taglib +github.com/hjfreyer/taglib-go/taglib/id3 +# github.com/jmespath/go-jmespath v0.3.0 +github.com/jmespath/go-jmespath +# github.com/kr/fs v0.1.0 +github.com/kr/fs +# github.com/lib/pq v1.1.1 +github.com/lib/pq +github.com/lib/pq/oid +github.com/lib/pq/scram +# github.com/mailgun/mailgun-go v0.0.0-20171127222028-17e8bd11e87c +github.com/mailgun/mailgun-go +# github.com/mattn/go-mastodon v0.0.5-0.20190517015615-8f6192e26b66 +github.com/mattn/go-mastodon +# github.com/mattn/go-sqlite3 v1.6.0 +github.com/mattn/go-sqlite3 +# github.com/miekg/dns v0.0.0-20161003181808-3f1f7c8ec9ea +github.com/miekg/dns +# github.com/nf/cr2 v0.0.0-20140528043846-05d46fef4f2f +github.com/nf/cr2 +# github.com/pkg/errors v0.9.1 +github.com/pkg/errors +# github.com/pkg/sftp v0.0.0-20180419200840-5bf2a174b604 +github.com/pkg/sftp +# github.com/plaid/plaid-go v0.0.0-20161222051224-02b6af68061b +github.com/plaid/plaid-go/plaid +# github.com/russross/blackfriday v2.0.0+incompatible +github.com/russross/blackfriday +# github.com/rwcarlsen/goexif v0.0.0-20180518182100-8d986c03457a +github.com/rwcarlsen/goexif/exif +github.com/rwcarlsen/goexif/tiff +# github.com/shurcooL/sanitized_anchor_name v0.0.0-20150515002706-11a20b799bf2 +github.com/shurcooL/sanitized_anchor_name +# github.com/syndtr/goleveldb v0.0.0-20180608030153-db3ee9ee8931 +github.com/syndtr/goleveldb/leveldb +github.com/syndtr/goleveldb/leveldb/cache +github.com/syndtr/goleveldb/leveldb/comparer +github.com/syndtr/goleveldb/leveldb/errors +github.com/syndtr/goleveldb/leveldb/filter +github.com/syndtr/goleveldb/leveldb/iterator +github.com/syndtr/goleveldb/leveldb/journal +github.com/syndtr/goleveldb/leveldb/memdb +github.com/syndtr/goleveldb/leveldb/opt +github.com/syndtr/goleveldb/leveldb/storage +github.com/syndtr/goleveldb/leveldb/table +github.com/syndtr/goleveldb/leveldb/util +# github.com/tgulacsi/picago v0.0.0-20171229130838-9e1ac2306c70 +github.com/tgulacsi/picago +# github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 +github.com/tomnomnom/linkheader +# go.opencensus.io v0.21.0 +go.opencensus.io +go.opencensus.io/internal +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata +go.opencensus.io/metric/metricproducer +go.opencensus.io/plugin/ocgrpc +go.opencensus.io/plugin/ochttp +go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/resource +go.opencensus.io/stats +go.opencensus.io/stats/internal +go.opencensus.io/stats/view +go.opencensus.io/tag +go.opencensus.io/trace +go.opencensus.io/trace/internal +go.opencensus.io/trace/propagation +go.opencensus.io/trace/tracestate +# go4.org v0.0.0-20190218023631-ce4c26f7be8e +go4.org/cloud/cloudlaunch +go4.org/cloud/google/gceutil +go4.org/cloud/google/gcsutil +go4.org/ctxutil +go4.org/errorutil +go4.org/fault +go4.org/jsonconfig +go4.org/legal +go4.org/lock +go4.org/media/heif +go4.org/media/heif/bmff +go4.org/net/throttle +go4.org/oauthutil +go4.org/readerutil +go4.org/rollsum +go4.org/strutil +go4.org/syncutil +go4.org/syncutil/singleflight +go4.org/types +go4.org/wkfs +go4.org/wkfs/gcs +go4.org/writerutil +# golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f +golang.org/x/crypto/acme +golang.org/x/crypto/acme/autocert +golang.org/x/crypto/cast5 +golang.org/x/crypto/curve25519 +golang.org/x/crypto/ed25519 +golang.org/x/crypto/ed25519/internal/edwards25519 +golang.org/x/crypto/internal/chacha20 +golang.org/x/crypto/internal/subtle +golang.org/x/crypto/nacl/secretbox +golang.org/x/crypto/openpgp +golang.org/x/crypto/openpgp/armor +golang.org/x/crypto/openpgp/elgamal +golang.org/x/crypto/openpgp/errors +golang.org/x/crypto/openpgp/packet +golang.org/x/crypto/openpgp/s2k +golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/poly1305 +golang.org/x/crypto/salsa20/salsa +golang.org/x/crypto/scrypt +golang.org/x/crypto/ssh +# golang.org/x/image v0.0.0-20190523035834-f03afa92d3ff +golang.org/x/image/draw +golang.org/x/image/math/f64 +golang.org/x/image/tiff +golang.org/x/image/tiff/lzw +# golang.org/x/net v0.0.0-20200202094626-16171245cfb2 +golang.org/x/net/context +golang.org/x/net/context/ctxhttp +golang.org/x/net/html +golang.org/x/net/html/atom +golang.org/x/net/html/charset +golang.org/x/net/http/httpguts +golang.org/x/net/http2 +golang.org/x/net/http2/hpack +golang.org/x/net/idna +golang.org/x/net/internal/timeseries +golang.org/x/net/trace +golang.org/x/net/xsrftoken +# golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0 +golang.org/x/oauth2 +golang.org/x/oauth2/google +golang.org/x/oauth2/internal +golang.org/x/oauth2/jws +golang.org/x/oauth2/jwt +# golang.org/x/sync v0.0.0-20190423024810-112230192c58 +golang.org/x/sync/errgroup +golang.org/x/sync/semaphore +# golang.org/x/sys v0.0.0-20190528183647-3626398d7749 +golang.org/x/sys/cpu +golang.org/x/sys/unix +golang.org/x/sys/windows +# golang.org/x/text v0.3.2 +golang.org/x/text/encoding +golang.org/x/text/encoding/charmap +golang.org/x/text/encoding/htmlindex +golang.org/x/text/encoding/internal +golang.org/x/text/encoding/internal/identifier +golang.org/x/text/encoding/japanese +golang.org/x/text/encoding/korean +golang.org/x/text/encoding/simplifiedchinese +golang.org/x/text/encoding/traditionalchinese +golang.org/x/text/encoding/unicode +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact +golang.org/x/text/internal/tag +golang.org/x/text/internal/utf8internal +golang.org/x/text/language +golang.org/x/text/runes +golang.org/x/text/secure/bidirule +golang.org/x/text/transform +golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm +# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 +golang.org/x/time/rate +# google.golang.org/api v0.5.0 +google.golang.org/api/cloudresourcemanager/v1 +google.golang.org/api/compute/v1 +google.golang.org/api/drive/v2 +google.golang.org/api/drive/v3 +google.golang.org/api/gensupport +google.golang.org/api/googleapi +google.golang.org/api/googleapi/internal/uritemplates +google.golang.org/api/googleapi/transport +google.golang.org/api/internal +google.golang.org/api/iterator +google.golang.org/api/option +google.golang.org/api/servicemanagement/v1 +google.golang.org/api/sqladmin/v1beta4 +google.golang.org/api/storage/v1 +google.golang.org/api/support/bundler +google.golang.org/api/transport +google.golang.org/api/transport/grpc +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation +# google.golang.org/appengine v1.6.0 +google.golang.org/appengine +google.golang.org/appengine/internal +google.golang.org/appengine/internal/app_identity +google.golang.org/appengine/internal/base +google.golang.org/appengine/internal/datastore +google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules +google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/socket +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket +google.golang.org/appengine/urlfetch +# google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/distribution +google.golang.org/genproto/googleapis/api/label +google.golang.org/genproto/googleapis/api/metric +google.golang.org/genproto/googleapis/api/monitoredres +google.golang.org/genproto/googleapis/datastore/v1 +google.golang.org/genproto/googleapis/iam/v1 +google.golang.org/genproto/googleapis/logging/type +google.golang.org/genproto/googleapis/logging/v2 +google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/type/expr +google.golang.org/genproto/googleapis/type/latlng +google.golang.org/genproto/protobuf/field_mask +# google.golang.org/grpc v1.21.0 +google.golang.org/grpc +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/oauth +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/naming +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap +# gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 +gopkg.in/mgo.v2 +gopkg.in/mgo.v2/bson +gopkg.in/mgo.v2/internal/json +gopkg.in/mgo.v2/internal/sasl +gopkg.in/mgo.v2/internal/scram +# rsc.io/pdf v0.0.0-20170302045715-1d34785eb915 +rsc.io/pdf +# rsc.io/qr v0.1.0 +rsc.io/qr +rsc.io/qr/coding +rsc.io/qr/gf256 diff --git a/vendor/myitcv.io/LICENSE b/vendor/myitcv.io/LICENSE deleted file mode 100644 index da07b6310..000000000 --- a/vendor/myitcv.io/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Paul Jolly (paul@myitcv.org.uk) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/fsnotify/fsnotify/AUTHORS b/vendor/myitcv.io/_vendor/src/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41c5..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/myitcv.io/_vendor/src/github.com/fsnotify/fsnotify/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index f21e54080..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/AUTHORS b/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/CONTRIBUTORS b/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/myitcv.io/_vendor/src/github.com/gopherjs/gopherjs/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/gopherjs/gopherjs/LICENSE deleted file mode 100644 index d496fef10..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/gopherjs/gopherjs/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Richard Musiol. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/gopherjs/jsbuiltin/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/gopherjs/jsbuiltin/LICENSE deleted file mode 100644 index 1eebfb119..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/gopherjs/jsbuiltin/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2015 Richard Musiol. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/LICENSE deleted file mode 100644 index c07a9311f..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/LICENSE +++ /dev/null @@ -1,3 +0,0 @@ -This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication -license. Its contents can be found at: -http://creativecommons.org/publicdomain/zero/1.0 diff --git a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkFile/file1 b/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkFile/file1 deleted file mode 120000 index 6e09272b6..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkFile/file1 +++ /dev/null @@ -1 +0,0 @@ -../symlinkSrc/file1 \ No newline at end of file diff --git a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkParent/symlinkTarget b/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkParent/symlinkTarget deleted file mode 120000 index ccb1f73af..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkParent/symlinkTarget +++ /dev/null @@ -1 +0,0 @@ -../symlinkSrc/ \ No newline at end of file diff --git a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkRecursiveParent/symlinkTarget b/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkRecursiveParent/symlinkTarget deleted file mode 120000 index 2ee9371dc..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/jteeuwen/go-bindata/testdata/symlinkRecursiveParent/symlinkTarget +++ /dev/null @@ -1 +0,0 @@ -../symlinkRecursiveParent/ \ No newline at end of file diff --git a/vendor/myitcv.io/_vendor/src/github.com/kisielk/gotool/LEGAL b/vendor/myitcv.io/_vendor/src/github.com/kisielk/gotool/LEGAL deleted file mode 100644 index 72b859cd6..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/kisielk/gotool/LEGAL +++ /dev/null @@ -1,32 +0,0 @@ -All the files in this distribution are covered under either the MIT -license (see the file LICENSE) except some files mentioned below. - -match.go, match_test.go: - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/kisielk/gotool/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/kisielk/gotool/LICENSE deleted file mode 100644 index 1cbf651e2..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/kisielk/gotool/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Kamil Kisiel - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/kr/fs/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/kr/fs/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/kr/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/kr/pretty/License b/vendor/myitcv.io/_vendor/src/github.com/kr/pretty/License deleted file mode 100644 index 05c783ccf..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/kr/pretty/License +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/kr/pty/License b/vendor/myitcv.io/_vendor/src/github.com/kr/pty/License deleted file mode 100644 index 6b7558b6b..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/kr/pty/License +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2011 Keith Rarick - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall -be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/kr/text/License b/vendor/myitcv.io/_vendor/src/github.com/kr/text/License deleted file mode 100644 index 480a32805..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/kr/text/License +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/neelance/sourcemap/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/neelance/sourcemap/LICENSE deleted file mode 100644 index eb7eccc1b..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/neelance/sourcemap/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2014 Richard Musiol. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/rogpeppe/rog-go/go9p/AUTHORS b/vendor/myitcv.io/_vendor/src/github.com/rogpeppe/rog-go/go9p/AUTHORS deleted file mode 100644 index 9d63b6432..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/rogpeppe/rog-go/go9p/AUTHORS +++ /dev/null @@ -1,13 +0,0 @@ -# This is the official list of Go9p authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Andrey Mirtchovski -Latchesar Ionkov -Roger Peppe diff --git a/vendor/myitcv.io/_vendor/src/github.com/rogpeppe/rog-go/go9p/CONTRIBUTORS b/vendor/myitcv.io/_vendor/src/github.com/rogpeppe/rog-go/go9p/CONTRIBUTORS deleted file mode 100644 index 5ac1f7a57..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/rogpeppe/rog-go/go9p/CONTRIBUTORS +++ /dev/null @@ -1,17 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Go9p repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# XXX more bumph here? -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Andrey Mirtchovski -Latchesar Ionkov -Roger Peppe diff --git a/vendor/myitcv.io/_vendor/src/github.com/russross/blackfriday/LICENSE.txt b/vendor/myitcv.io/_vendor/src/github.com/russross/blackfriday/LICENSE.txt deleted file mode 100644 index 2885af360..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/russross/blackfriday/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/github.com/sclevine/agouti/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/sclevine/agouti/LICENSE deleted file mode 100644 index 939f054ff..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/sclevine/agouti/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Stephen Levine - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/LICENSE.txt b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e266..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_agpl.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_agpl.go deleted file mode 100644 index bc22e9732..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_agpl.go +++ /dev/null @@ -1,683 +0,0 @@ -package cmd - -func initAgpl() { - Licenses["agpl"] = License{ - Name: "GNU Affero General Public License", - PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"}, - Header: ` -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see .`, - Text: ` GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_apache_2.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_apache_2.go deleted file mode 100644 index 38393d541..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_apache_2.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initApache2() { - Licenses["apache"] = License{ - Name: "Apache 2.0", - PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"}, - Header: ` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.`, - Text: ` - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go deleted file mode 100644 index 4a847e04a..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initBsdClause2() { - Licenses["freebsd"] = License{ - Name: "Simplified BSD License", - PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd", - "2 clause bsd", "simplified bsd license"}, - Header: `All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.`, - Text: `{{ .copyright }} -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go deleted file mode 100644 index c7476b31f..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initBsdClause3() { - Licenses["bsd"] = License{ - Name: "NewBSD", - PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"}, - Header: `All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.`, - Text: `{{ .copyright }} -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go deleted file mode 100644 index 03e05b3a7..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initGpl2() { - Licenses["gpl2"] = License{ - Name: "GNU General Public License 2.0", - PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"}, - Header: ` -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see .`, - Text: ` GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type 'show c' for details. - -The hypothetical commands 'show w' and 'show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than 'show w' and 'show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - 'Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go deleted file mode 100644 index ce07679c7..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go +++ /dev/null @@ -1,711 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initGpl3() { - Licenses["gpl3"] = License{ - Name: "GNU General Public License 3.0", - PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"}, - Header: ` -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see .`, - Text: ` GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type 'show c' for details. - -The hypothetical commands 'show w' and 'show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_lgpl.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_lgpl.go deleted file mode 100644 index 0f8b96cad..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_lgpl.go +++ /dev/null @@ -1,186 +0,0 @@ -package cmd - -func initLgpl() { - Licenses["lgpl"] = License{ - Name: "GNU Lesser General Public License", - PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"}, - Header: ` -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see .`, - Text: ` GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library.`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_mit.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_mit.go deleted file mode 100644 index bd2d0c4fa..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/license_mit.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initMit() { - Licenses["mit"] = License{ - Name: "MIT License", - PossibleMatches: []string{"mit"}, - Header: ` -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE.`, - Text: `The MIT License (MIT) - -{{ .copyright }} - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -`, - } -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/licenses.go b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/licenses.go deleted file mode 100644 index a070134dd..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/licenses.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -import ( - "strings" - "time" - - "github.com/spf13/viper" -) - -// Licenses contains all possible licenses a user can choose from. -var Licenses = make(map[string]License) - -// License represents a software license agreement, containing the Name of -// the license, its possible matches (on the command line as given to cobra), -// the header to be used with each file on the file's creating, and the text -// of the license -type License struct { - Name string // The type of license in use - PossibleMatches []string // Similar names to guess - Text string // License text data - Header string // License header for source files -} - -func init() { - // Allows a user to not use a license. - Licenses["none"] = License{"None", []string{"none", "false"}, "", ""} - - initApache2() - initMit() - initBsdClause3() - initBsdClause2() - initGpl2() - initGpl3() - initLgpl() - initAgpl() -} - -// getLicense returns license specified by user in flag or in config. -// If user didn't specify the license, it returns Apache License 2.0. -// -// TODO: Inspect project for existing license -func getLicense() License { - // If explicitly flagged, use that. - if userLicense != "" { - return findLicense(userLicense) - } - - // If user wants to have custom license, use that. - if viper.IsSet("license.header") || viper.IsSet("license.text") { - return License{Header: viper.GetString("license.header"), - Text: viper.GetString("license.text")} - } - - // If user wants to have built-in license, use that. - if viper.IsSet("license") { - return findLicense(viper.GetString("license")) - } - - // If user didn't set any license, use Apache 2.0 by default. - return Licenses["apache"] -} - -func copyrightLine() string { - author := viper.GetString("author") - - year := viper.GetString("year") // For tests. - if year == "" { - year = time.Now().Format("2006") - } - - return "Copyright © " + year + " " + author -} - -// findLicense looks for License object of built-in licenses. -// If it didn't find license, then the app will be terminated and -// error will be printed. -func findLicense(name string) License { - found := matchLicense(name) - if found == "" { - er("unknown license: " + name) - } - return Licenses[found] -} - -// matchLicense compares the given a license name -// to PossibleMatches of all built-in licenses. -// It returns blank string, if name is blank string or it didn't find -// then appropriate match to name. -func matchLicense(name string) string { - if name == "" { - return "" - } - - for key, lic := range Licenses { - for _, match := range lic.PossibleMatches { - if strings.EqualFold(name, match) { - return key - } - } - } - - return "" -} diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden b/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden deleted file mode 100644 index d64569567..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/myitcv.io/_vendor/src/github.com/spf13/pflag/LICENSE b/vendor/myitcv.io/_vendor/src/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea..000000000 --- a/vendor/myitcv.io/_vendor/src/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/AUTHORS b/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba0..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/CONTRIBUTORS b/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/PATENTS b/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/net/AUTHORS b/vendor/myitcv.io/_vendor/src/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/net/CONTRIBUTORS b/vendor/myitcv.io/_vendor/src/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/net/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/net/PATENTS b/vendor/myitcv.io/_vendor/src/golang.org/x/net/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/AUTHORS b/vendor/myitcv.io/_vendor/src/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/CONTRIBUTORS b/vendor/myitcv.io/_vendor/src/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/sys/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/PATENTS b/vendor/myitcv.io/_vendor/src/golang.org/x/sys/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/sys/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/AUTHORS b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/CONTRIBUTORS b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/PATENTS b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/cmd/getgo/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/cmd/getgo/LICENSE deleted file mode 100644 index 32017f8fa..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/cmd/getgo/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/moduleloader/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/moduleloader/LICENSE deleted file mode 100644 index 1723a2247..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/moduleloader/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013-2016 Guy Bedford, Luke Hoban, Addy Osmani - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/typescript/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/typescript/LICENSE deleted file mode 100644 index e7259f843..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/typescript/LICENSE +++ /dev/null @@ -1,55 +0,0 @@ -Apache License - -Version 2.0, January 2004 - -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of this License; and - -You must cause any modified files to carry prominent notices stating that You changed the files; and - -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/webcomponents/LICENSE b/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/webcomponents/LICENSE deleted file mode 100644 index e648283b4..000000000 --- a/vendor/myitcv.io/_vendor/src/golang.org/x/tools/third_party/webcomponents/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015 The Polymer Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/myitcv.io/_vendor/src/gopkg.in/check.v1/LICENSE b/vendor/myitcv.io/_vendor/src/gopkg.in/check.v1/LICENSE deleted file mode 100644 index 545cf2d33..000000000 --- a/vendor/myitcv.io/_vendor/src/gopkg.in/check.v1/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/gopkg.in/fsnotify/fsnotify.v1/AUTHORS b/vendor/myitcv.io/_vendor/src/gopkg.in/fsnotify/fsnotify.v1/AUTHORS deleted file mode 100644 index 5ab5d41c5..000000000 --- a/vendor/myitcv.io/_vendor/src/gopkg.in/fsnotify/fsnotify.v1/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/myitcv.io/_vendor/src/gopkg.in/fsnotify/fsnotify.v1/LICENSE b/vendor/myitcv.io/_vendor/src/gopkg.in/fsnotify/fsnotify.v1/LICENSE deleted file mode 100644 index f21e54080..000000000 --- a/vendor/myitcv.io/_vendor/src/gopkg.in/fsnotify/fsnotify.v1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/_vendor/src/honnef.co/go/js/dom/LICENSE b/vendor/myitcv.io/_vendor/src/honnef.co/go/js/dom/LICENSE deleted file mode 100644 index 676a9feae..000000000 --- a/vendor/myitcv.io/_vendor/src/honnef.co/go/js/dom/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Dominik Honnef - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/honnef.co/go/js/util/LICENSE b/vendor/myitcv.io/_vendor/src/honnef.co/go/js/util/LICENSE deleted file mode 100644 index 676a9feae..000000000 --- a/vendor/myitcv.io/_vendor/src/honnef.co/go/js/util/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Dominik Honnef - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/honnef.co/go/js/xhr/LICENSE b/vendor/myitcv.io/_vendor/src/honnef.co/go/js/xhr/LICENSE deleted file mode 100644 index 676a9feae..000000000 --- a/vendor/myitcv.io/_vendor/src/honnef.co/go/js/xhr/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Dominik Honnef - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/myitcv.io/_vendor/src/mvdan.cc/sh/LICENSE b/vendor/myitcv.io/_vendor/src/mvdan.cc/sh/LICENSE deleted file mode 100644 index 2a5268e5f..000000000 --- a/vendor/myitcv.io/_vendor/src/mvdan.cc/sh/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016, Daniel Martí. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/cmd/gai/LICENSE b/vendor/myitcv.io/cmd/gai/LICENSE deleted file mode 100644 index da07b6310..000000000 --- a/vendor/myitcv.io/cmd/gai/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Paul Jolly (paul@myitcv.org.uk) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/myitcv.io/cmd/gai/gotype/LICENSE b/vendor/myitcv.io/cmd/gai/gotype/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/myitcv.io/cmd/gai/gotype/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/cmd/gg/LICENSE b/vendor/myitcv.io/cmd/gg/LICENSE deleted file mode 100644 index da07b6310..000000000 --- a/vendor/myitcv.io/cmd/gg/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Paul Jolly (paul@myitcv.org.uk) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/myitcv.io/cmd/gitgodoc/LICENSE b/vendor/myitcv.io/cmd/gitgodoc/LICENSE deleted file mode 100644 index da07b6310..000000000 --- a/vendor/myitcv.io/cmd/gitgodoc/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Paul Jolly (paul@myitcv.org.uk) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/myitcv.io/cmd/modpub/LICENSE b/vendor/myitcv.io/cmd/modpub/LICENSE deleted file mode 100644 index 20513b6a5..000000000 --- a/vendor/myitcv.io/cmd/modpub/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016 Paul Jolly. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/gogenerate/LICENSE b/vendor/myitcv.io/gogenerate/LICENSE deleted file mode 100644 index da07b6310..000000000 --- a/vendor/myitcv.io/gogenerate/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Paul Jolly (paul@myitcv.org.uk) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/myitcv.io/gogenerate/_testFiles/licenseFile.txt b/vendor/myitcv.io/gogenerate/_testFiles/licenseFile.txt deleted file mode 100644 index 50949ffd4..000000000 --- a/vendor/myitcv.io/gogenerate/_testFiles/licenseFile.txt +++ /dev/null @@ -1,2 +0,0 @@ -Copyright (c) Bananaman 2016 -Line 2 diff --git a/vendor/myitcv.io/gogenerate/coregogenerate.go b/vendor/myitcv.io/gogenerate/coregogenerate.go deleted file mode 100644 index 2fbced99d..000000000 --- a/vendor/myitcv.io/gogenerate/coregogenerate.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2016 Paul Jolly , all rights reserved. -// Use of this document is governed by a license found in the LICENSE document. - -package gogenerate - -import ( - "bufio" - "bytes" - "fmt" - "go/build" - "io" - "os" - "path/filepath" - "strconv" - "strings" -) - -// The following code is largely adapted from cmd/go. We reproduce the license -// here therefore - -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A generator represents the state of a single Go source file -// being scanned for generator commands. -type generator struct { - f func(line int, dirArgs []string) error - r io.Reader - dir string // full rooted directory of file. - file string // base name of file. - pkg string - commands map[string][]string - lineNum int // current line number. - env []string -} - -// DirFunc runs f(cmds) on each go generate directive (as defined by -// go generate -help) found in the absolute-named file that is part -// of package pkg -func DirFunc(pkg string, dir, file string, f func(line int, dirArgs []string) error) error { - fi, err := os.Open(filepath.Join(dir, file)) - if err != nil { - return err - } - defer fi.Close() - - g := &generator{ - f: f, - pkg: pkg, - commands: make(map[string][]string), - dir: dir, - file: file, - r: fi, - } - - return g.matches() -} - -// run runs the generators in the current file. -func (g *generator) matches() (err error) { - // Processing below here calls g.errorf on failure, which does panic(stop). - // If we encounter an error, we abort the package. - defer func() { - e := recover() - if e, isErr := e.(error); isErr { - err = e - } - }() - - // Scan for lines that start "//go:generate". - // Can't use bufio.Scanner because it can't handle long lines, - // which are likely to appear when using generate. - input := bufio.NewReader(g.r) - // One line per loop. - for { - g.lineNum++ // 1-indexed. - var buf []byte - buf, err = input.ReadSlice('\n') - if err == bufio.ErrBufferFull { - // Line too long - consume and ignore. - if isGoGenerate(buf) { - g.errorf("directive too long") - } - for err == bufio.ErrBufferFull { - _, err = input.ReadSlice('\n') - } - if err != nil { - break - } - continue - } - - if err != nil { - // Check for marker at EOF without final \n. - if err == io.EOF && isGoGenerate(buf) { - err = io.ErrUnexpectedEOF - } - break - } - - if !isGoGenerate(buf) { - continue - } - - g.setEnv() - words := g.split(string(buf)) - if len(words) == 0 { - g.errorf("no arguments to directive") - } - if words[0] == "-command" { - g.setShorthand(words) - continue - } - - err := g.f(g.lineNum, words) - if err != nil { - g.errorf("callback error: %v", err) - } - } - if err != nil && err != io.EOF { - g.errorf("error reading") - } - - return nil -} - -func isGoGenerate(buf []byte) bool { - return bytes.HasPrefix(buf, []byte(GoGeneratePrefix+" ")) || bytes.HasPrefix(buf, []byte(GoGeneratePrefix+"\t")) -} - -// setEnv sets the extra environment variables used when executing a -// single go:generate command. -func (g *generator) setEnv() { - g.env = []string{ - "GOARCH=" + build.Default.GOARCH, - "GOOS=" + build.Default.GOOS, - "GOFILE=" + g.file, - "GOLINE=" + strconv.Itoa(g.lineNum), - "GOPACKAGE=" + g.pkg, - "DOLLAR=" + "$", - } -} - -// split breaks the line into words, evaluating quoted -// strings and evaluating environment variables. -// The initial //go:generate element is present in line. -func (g *generator) split(line string) []string { - // Parse line, obeying quoted strings. - var words []string - line = line[len("//go:generate ") : len(line)-1] // Drop preamble and final newline. - // There may still be a carriage return. - if len(line) > 0 && line[len(line)-1] == '\r' { - line = line[:len(line)-1] - } - // One (possibly quoted) word per iteration. -Words: - for { - line = strings.TrimLeft(line, " \t") - if len(line) == 0 { - break - } - if line[0] == '"' { - for i := 1; i < len(line); i++ { - c := line[i] // Only looking for ASCII so this is OK. - switch c { - case '\\': - if i+1 == len(line) { - g.errorf("bad backslash") - } - i++ // Absorb next byte (If it's a multibyte we'll get an error in Unquote). - case '"': - word, err := strconv.Unquote(line[0 : i+1]) - if err != nil { - g.errorf("bad quoted string") - } - words = append(words, word) - line = line[i+1:] - // Check the next character is space or end of line. - if len(line) > 0 && line[0] != ' ' && line[0] != '\t' { - g.errorf("expect space after quoted argument") - } - continue Words - } - } - g.errorf("mismatched quoted string") - } - i := strings.IndexAny(line, " \t") - if i < 0 { - i = len(line) - } - words = append(words, line[0:i]) - line = line[i:] - } - // Substitute command if required. - if len(words) > 0 && g.commands[words[0]] != nil { - // Replace 0th word by command substitution. - words = append(g.commands[words[0]], words[1:]...) - } - // Substitute environment variables. - for i, word := range words { - words[i] = os.Expand(word, g.expandVar) - } - return words -} - -// errorf logs an error message prefixed with the file and line number. -// It then exits the program (with exit status 1) because generation stops -// at the first error. -func (g *generator) errorf(format string, args ...interface{}) { - panic(fmt.Errorf("%s:%d: %s", filepath.Join(g.dir, g.file), g.lineNum, fmt.Sprintf(format, args...))) -} - -// expandVar expands the $XXX invocation in word. It is called -// by os.Expand. -func (g *generator) expandVar(word string) string { - w := word + "=" - for _, e := range g.env { - if strings.HasPrefix(e, w) { - return e[len(w):] - } - } - return os.Getenv(word) -} - -// setShorthand installs a new shorthand as defined by a -command directive. -func (g *generator) setShorthand(words []string) { - // Create command shorthand. - if len(words) == 1 { - g.errorf("no command specified for -command") - } - command := words[1] - if g.commands[command] != nil { - g.errorf("command %q defined multiply defined", command) - } - g.commands[command] = words[2:len(words):len(words)] // force later append to make copy -} diff --git a/vendor/myitcv.io/gogenerate/gogenerate.go b/vendor/myitcv.io/gogenerate/gogenerate.go deleted file mode 100644 index 6f3f50f78..000000000 --- a/vendor/myitcv.io/gogenerate/gogenerate.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (c) 2016 Paul Jolly , all rights reserved. -// Use of this document is governed by a license found in the LICENSE document. - -// Package gogenerate exposes some of the unexported internals of the go generate command as a convenience -// for the authors of go generate generators. See https://github.com/myitcv/gogenerate/wiki/Go-Generate-Notes -// for further notes on such generators. It also exposes some convenience functions that might be useful -// to authors of generators -// -package gogenerate // import "myitcv.io/gogenerate" - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/build" - "os" - "path/filepath" - "strings" -) - -// These constants correspond in name and value to the details given in -// go generate --help -const ( - GOARCH = "GOARCH" - GOFILE = "GOFILE" - GOLINE = "GOLINE" - GOOS = "GOOS" - GOPACKAGE = "GOPACKAGE" - GOPATH = "GOPATH" - - GoGeneratePrefix = "//go:generate" -) - -const ( - // genStr is the string used in the prefix of generated files - genStr = "gen" - - // sep is the separator used between the parts of a file name; the prefix used to identify - // generated files, the name (body) and the suffix used to identify the generator - sep = "_" - - // genFilePrefix is the prefix used on all generated files (which strictly speaking - // is limited to Go files as far as this definition is concerned, but in practice need not be) - genFilePrefix = genStr + sep -) - -const ( - // FlagLog is the name of the common flag shared between go generate generators - // to control logging verbosity. - FlagLog = "gglog" - - // FlagOutPkgPrefix is the prefix used for flags generated by OutPkgFlag. - FlagOutPkgPrefix = "outpkg:" - - // FlagLicenseFile is the name of the common flag shared between go generate generators - // to provide a license header file. - FlagLicenseFile = "licenseFile" -) - -type LogLevel string - -// The various log levels supported by the flag returned by LogFlag() -const ( - LogInfo LogLevel = "info" - LogWarning LogLevel = "warning" - LogError LogLevel = "error" - LogFatal LogLevel = "fatal" -) - -// FileIsGenerated determines wheter the Go file located at path is generated or not -// and if it is generated returns the base name of the generator that generated it -func FileIsGenerated(path string) (string, bool) { - fn := filepath.Base(path) - - if !strings.HasPrefix(fn, genFilePrefix) || !strings.HasSuffix(fn, ".go") { - return "", false - } - - fn = strings.TrimPrefix(fn, genFilePrefix) - fn = strings.TrimSuffix(fn, ".go") - - if strings.HasSuffix(fn, "_test") { - fn = strings.TrimSuffix(fn, "_test") - } - - // deals with the edge case gen_.go or gen__test.go - if fn == "" { - return "", false - } - - parts := strings.Split(fn, sep) - - return parts[len(parts)-1], true -} - -// FileGeneratedBy returns true if the base name of the supplied path is a Go file that -// would have been generated by the supplied cmd -func FileGeneratedBy(path string, cmd string) bool { - cmd = filepath.Base(cmd) - - c, ok := FileIsGenerated(path) - - return ok && c == cmd -} - -// NameFileFromFile uses the provided filename as a template and returns a generated filename consistent with -// the provided command -func NameFileFromFile(name string, cmd string) (string, bool) { - dir := filepath.Dir(name) - name = filepath.Base(name) - - if !strings.HasSuffix(name, ".go") { - return "", false - } - - name = strings.TrimSuffix(name, ".go") - cmd = filepath.Base(cmd) - - var res string - - if strings.HasSuffix(name, "_test") { - name = strings.TrimSuffix(name, "_test") - res = NameTestFile(name, cmd) - } else { - res = NameFile(name, cmd) - } - - return filepath.Join(dir, res), true -} - -func nameBase(name string, cmd string) string { - res := genStr - - if name != "" { - res += sep + name - } - - res += sep + cmd - - return res -} - -// NameFile returns a file name that conforms with the pattern associated with -// files generated by the provided command -func NameFile(name string, cmd string) string { - cmd = filepath.Base(cmd) - - return nameBase(name, cmd) + ".go" -} - -// NameTestFile returns a file name that conforms with the pattern associated -// with files generated by the provided command -func NameTestFile(name string, cmd string) string { - cmd = filepath.Base(cmd) - - return nameBase(name, cmd) + "_test.go" -} - -type outputs []string - -func (o *outputs) String() string { - return fmt.Sprint(*o) -} - -func (o *outputs) Set(value string) error { - *o = append(*o, value) - return nil -} - -var outputsSet = make(map[string]bool) - -// OutPkgFlag defines a new flag "outpkg:"+key that can accept a list of -// package specifications that represent output targets above and beyond the -// default of self. -func OutPkgFlag(key string) *outputs { - if outputsSet[key] { - panic(fmt.Errorf("already defined outpkg flag for key %q", key)) - } - - // safe doing this because we should be in init phase - outputsSet[key] = true - - var res outputs - - flag.Var(&res, "outpkg:"+key, "list of output packages in addition to self") - - return &res -} - -// LogFlag defines a command line string flag named according to the constant -// FlagLog and returns a pointer to the string the flag sets -func LogFlag() *string { - return flag.String(FlagLog, string(LogFatal), "log level; one of info, warning, error, fatal") -} - -// LicenseFileFlag defines a command line string flag named according to the -// constant FlagLicenseFile and returns a pointer ot the string that flag set -func LicenseFileFlag() *string { - return flag.String(FlagLicenseFile, "", "file that contains a license header to be inserted at the top of each generated file") -} - -// CommentLicenseHeader is a convenience function to be used in conjunction -// with LicenseFileFlag; if a filename is provided it reads the contents of the -// file and returns a line-commented transformation of the contents with a -// final blank newline -func CommentLicenseHeader(file *string) (string, error) { - if file == nil || *file == "" { - return "", nil - } - - fi, err := os.Open(*file) - if err != nil { - return "", fmt.Errorf("could not open file %q: %v", *file, err) - } - - res := bytes.NewBuffer(nil) - - lastLineEmpty := false - scanner := bufio.NewScanner(fi) - - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - lastLineEmpty = line == "" - fmt.Fprintln(res, "//", line) - } - - if err := scanner.Err(); err != nil { - return "", fmt.Errorf("failed to scan file %v: %v", *file, err) - } - - // ensure we have a space before package - if !lastLineEmpty { - fmt.Fprintln(res) - } - - return res.String(), nil -} - -// DefaultLogLevel is provided simply as a convenience along with LogFlag to ensure a default LogLevel -// in a flag variable. This is necessary because of the interplay between go generate argument parsing -// and the advice given for log levels via gg. -func DefaultLogLevel(f *string, ll LogLevel) { - if f != nil && *f == "" { - *f = string(ll) - } -} - -// FilesContainingCmd returns a map of Go file name (defined by go list as -// GoFiles + CgoFiles + TestGoFiles + XTestGoFiles) in the directory dir, to a -// count of the number of times directive command appears in that file. (after -// quote and variable expansion as described by go generate -help). When -// comparing commands, the filepath.Base of each is compared. The file names -// will, by definition, be relative to dir -func FilesContainingCmd(dir string, command string) (map[string]int, error) { - - command = filepath.Base(strings.TrimSpace(command)) - if command == "" { - return nil, nil - } - - pkg, err := build.ImportDir(dir, build.IgnoreVendor) - if err != nil { - return nil, err - } - - // GoFiles+CgoFiles+TestGoFiles+XTestGoFiles per go list - // these are all relative to path - gofiles := make([]string, 0, len(pkg.GoFiles)+len(pkg.CgoFiles)+len(pkg.TestGoFiles)+len(pkg.XTestGoFiles)) - gofiles = append(gofiles, pkg.GoFiles...) - gofiles = append(gofiles, pkg.CgoFiles...) - gofiles = append(gofiles, pkg.TestGoFiles...) - gofiles = append(gofiles, pkg.XTestGoFiles...) - - matchMap := make(map[string]int) - - for _, f := range gofiles { - checkMatch := func(line int, args []string) error { - if filepath.Base(args[0]) == command { - matchMap[f] += 1 - } - - return nil - } - - err = DirFunc(pkg.ImportPath, dir, f, checkMatch) - - if err != nil { - return nil, err - } - } - - return matchMap, nil -} diff --git a/vendor/myitcv.io/gopherize.me/client/artwork b/vendor/myitcv.io/gopherize.me/client/artwork deleted file mode 120000 index 1eae04007..000000000 --- a/vendor/myitcv.io/gopherize.me/client/artwork +++ /dev/null @@ -1 +0,0 @@ -../artwork \ No newline at end of file diff --git a/vendor/myitcv.io/highlightjs/LICENSE b/vendor/myitcv.io/highlightjs/LICENSE deleted file mode 100644 index 20513b6a5..000000000 --- a/vendor/myitcv.io/highlightjs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016 Paul Jolly. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/hybridimporter/LICENSE b/vendor/myitcv.io/hybridimporter/LICENSE deleted file mode 100644 index da07b6310..000000000 --- a/vendor/myitcv.io/hybridimporter/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Paul Jolly (paul@myitcv.org.uk) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/myitcv.io/immutable/LICENSE b/vendor/myitcv.io/immutable/LICENSE deleted file mode 100644 index 20513b6a5..000000000 --- a/vendor/myitcv.io/immutable/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016 Paul Jolly. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/immutable/cmd/immutableGen/internal/coretest/license.txt b/vendor/myitcv.io/immutable/cmd/immutableGen/internal/coretest/license.txt deleted file mode 100644 index 22318c346..000000000 --- a/vendor/myitcv.io/immutable/cmd/immutableGen/internal/coretest/license.txt +++ /dev/null @@ -1 +0,0 @@ -My favourite license diff --git a/vendor/myitcv.io/immutable/example/license_header.txt b/vendor/myitcv.io/immutable/example/license_header.txt deleted file mode 100644 index 1e897d30a..000000000 --- a/vendor/myitcv.io/immutable/example/license_header.txt +++ /dev/null @@ -1,2 +0,0 @@ -Copyright (c) 2016 Paul Jolly , all rights reserved. -Use of this document is governed by a license found in the LICENSE document. diff --git a/vendor/myitcv.io/react/.bin_deps b/vendor/myitcv.io/react/.bin_deps deleted file mode 100644 index 5eb6914a3..000000000 --- a/vendor/myitcv.io/react/.bin_deps +++ /dev/null @@ -1,5 +0,0 @@ -myitcv.io/react/cmd/coreGen -myitcv.io/react/cmd/cssGen -myitcv.io/react/cmd/reactGen -myitcv.io/react/cmd/reactVet -myitcv.io/react/cmd/stateGen diff --git a/vendor/myitcv.io/react/.ggconfig.json b/vendor/myitcv.io/react/.ggconfig.json deleted file mode 100644 index 0581266cd..000000000 --- a/vendor/myitcv.io/react/.ggconfig.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Untyped": [ - "myitcv.io/react/cmd/reactGen", - "myitcv.io/react/cmd/coreGen", - "myitcv.io/react/cmd/cssGen", - "myitcv.io/react/cmd/stateGen", - "myitcv.io/sorter/cmd/sortGen", - "myitcv.io/immutable/cmd/immutableGen" - ] -} diff --git a/vendor/myitcv.io/react/.gitattributes b/vendor/myitcv.io/react/.gitattributes deleted file mode 100644 index eea25391c..000000000 --- a/vendor/myitcv.io/react/.gitattributes +++ /dev/null @@ -1,4 +0,0 @@ -examples/sites/common/* linguist-vendored -examples/sites/common/* linguist-generated=true -*.inc.js linguist-vendored -*.inc.js linguist-generated=true diff --git a/vendor/myitcv.io/react/.vendored_bin_deps b/vendor/myitcv.io/react/.vendored_bin_deps deleted file mode 100644 index bf502a2dc..000000000 --- a/vendor/myitcv.io/react/.vendored_bin_deps +++ /dev/null @@ -1,8 +0,0 @@ -github.com/gopherjs/gopherjs -github.com/jteeuwen/go-bindata/go-bindata -golang.org/x/tools/cmd/goimports -golang.org/x/tools/cmd/stringer -myitcv.io/cmd/gjbt -myitcv.io/immutable/cmd/immutableGen -myitcv.io/immutable/cmd/immutableVet -myitcv.io/sorter/cmd/sortGen diff --git a/vendor/myitcv.io/react/LICENSE b/vendor/myitcv.io/react/LICENSE deleted file mode 100644 index 20513b6a5..000000000 --- a/vendor/myitcv.io/react/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016 Paul Jolly. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/myitcv.io/react/README.md b/vendor/myitcv.io/react/README.md deleted file mode 100644 index 21d85d880..000000000 --- a/vendor/myitcv.io/react/README.md +++ /dev/null @@ -1,31 +0,0 @@ - -## `react` - -Package react is a set of GopherJS bindings for Facebook's React, a Javascript library for building user interfaces. - -``` -go get -u myitcv.io/react -``` - - -### Running the tests - -As you can see in [`.travis.yml`](.travis.yml), the CI tests consist of running: - -```bash -./_scripts/run_tests.sh -``` - -followed by ensuring that `git` is clean. This ensures that "current" generated files are committed to the repo. - -### Docs - -Please start reading [here](_doc/README.md). diff --git a/vendor/myitcv.io/react/cmd/reactGen/comp_gen.go b/vendor/myitcv.io/react/cmd/reactGen/comp_gen.go deleted file mode 100644 index 568aed84d..000000000 --- a/vendor/myitcv.io/react/cmd/reactGen/comp_gen.go +++ /dev/null @@ -1,399 +0,0 @@ -package main - -import ( - "fmt" - "go/ast" - "go/format" - "io/ioutil" - "strings" - "unicode" - "unicode/utf8" - - "myitcv.io/gogenerate" -) - -type compGen struct { - *coreGen - - Recv string - Name string - - HasState bool - HasProps bool - HasGetInitState bool - HasComponentWillReceiveProps bool - - PropsHasEquals bool - StateHasEquals bool - - RendersSlice bool - - RendersMethods []RendersMethod -} - -type RendersMethod struct { - Name string - Type string -} - -func (g *gen) genComp(defName string) { - - name := strings.TrimSuffix(defName, compDefSuffix) - - if g.isReactCore { - panic(fmt.Errorf("don't yet know how to generate core components like %v", name)) - } - - r, _ := utf8.DecodeRuneInString(name) - - cg := &compGen{ - coreGen: newCoreGen(g), - Name: name, - Recv: string(unicode.ToLower(r)), - } - - _, hasState := g.types[name+stateTypeSuffix] - - _, hasPropsTmpl := g.types[propsTypeTmplPrefix+name+propsTypeSuffix] - _, hasPropsType := g.types[name+propsTypeSuffix] - - hasProps := hasPropsTmpl || hasPropsType - - cg.HasState = hasState - cg.HasProps = hasProps - - for _, ff := range g.nonPointMeths[defName] { - m := ff.fn - - // gather Renders* methods - func() { - if !strings.HasPrefix(m.Name.Name, "Renders") { - return - } - - if m.Type.Params == nil || len(m.Type.Params.List) != 1 { - return - } - - if m.Type.Results != nil && len(m.Type.Results.List) != 0 { - return - } - - ap := m.Type.Params.List[0] - - cg.RendersMethods = append(cg.RendersMethods, RendersMethod{ - Name: m.Name.Name, - Type: astNodeString(ap.Type), - }) - }() - - func() { - if m.Name.Name != "Render" { - return - } - - if m.Type.Params != nil && len(m.Type.Params.List) != 0 { - return - } - - if m.Type.Results == nil || len(m.Type.Results.List) != 1 { - return - } - - rp := m.Type.Results.List[0] - - at, ok := rp.Type.(*ast.ArrayType) - - if !ok { - return - } - - if at.Len == nil { - cg.RendersSlice = true - } - }() - } - - if hasState { - for _, ff := range g.nonPointMeths[defName] { - m := ff.fn - - if m.Name.Name != getInitialState { - continue - } - - if m.Type.Params != nil && len(m.Type.Params.List) > 0 { - continue - } - - if m.Type.Results != nil && len(m.Type.Results.List) != 1 { - continue - } - - rp := m.Type.Results.List[0] - - id, ok := rp.Type.(*ast.Ident) - if !ok { - continue - } - - if id.Name == name+stateTypeSuffix { - cg.HasGetInitState = true - break - } - } - - for _, ff := range g.nonPointMeths[name+stateTypeSuffix] { - m := ff.fn - - if m.Name.Name != equals { - continue - } - - if m.Type.Params != nil && len(m.Type.Params.List) != 1 { - continue - } - - if m.Type.Results != nil && len(m.Type.Results.List) != 1 { - continue - } - - { - v := m.Type.Params.List[0] - - id, ok := v.Type.(*ast.Ident) - if !ok { - continue - } - - if id.Name != name+stateTypeSuffix { - continue - } - } - - { - v := m.Type.Results.List[0] - - id, ok := v.Type.(*ast.Ident) - if !ok { - continue - } - - if id.Name != "bool" { - continue - } - } - - cg.StateHasEquals = true - } - } - - if hasProps { - for _, ff := range g.nonPointMeths[defName] { - m := ff.fn - - if m.Name.Name != componentWillReceiveProps { - continue - } - - if m.Type.Params != nil && len(m.Type.Params.List) != 1 { - continue - } - - if m.Type.Results != nil && len(m.Type.Results.List) != 0 { - continue - } - - p := m.Type.Params.List[0] - - id, ok := p.Type.(*ast.Ident) - if !ok { - continue - } - - if id.Name == name+propsTypeSuffix { - cg.HasComponentWillReceiveProps = true - break - } - } - - for _, ff := range g.nonPointMeths[name+propsTypeSuffix] { - m := ff.fn - - if m.Name.Name != equals { - continue - } - - if m.Type.Params != nil && len(m.Type.Params.List) != 1 { - continue - } - - if m.Type.Results != nil && len(m.Type.Results.List) != 1 { - continue - } - - { - v := m.Type.Params.List[0] - - id, ok := v.Type.(*ast.Ident) - if !ok { - continue - } - - if id.Name != name+propsTypeSuffix { - continue - } - } - - { - v := m.Type.Results.List[0] - - id, ok := v.Type.(*ast.Ident) - if !ok { - continue - } - - if id.Name != "bool" { - continue - } - } - - cg.PropsHasEquals = true - } - } - - cg.pf("// Code generated by %v. DO NOT EDIT.\n", reactGenCmd) - cg.pln() - cg.pf("package %v\n", cg.pkg) - - cg.pf("import \"%v\"\n", reactPkg) - cg.pln() - - cg.pt(` -{{ $cg := . }} - -type {{.Name}}Elem struct { - react.Element -} - -{{range $rm := .RendersMethods }} -func ({{$cg.Recv}} *{{$cg.Name}}Elem) {{$rm.Name}}({{$rm.Type}}) {} -{{end}} - -{{if .RendersMethods}} -func ({{$cg.Recv}} *{{$cg.Name}}Elem) noop() { - var v {{.Name}}Def - r := v.Render() - - {{range $rm := .RendersMethods }} - v.{{$rm.Name}}(r) - {{end -}} -} -{{end}} - -func build{{.Name}}(cd react.ComponentDef) react.Component { - return {{.Name}}Def{ComponentDef: cd} -} - -func build{{.Name}}Elem({{if .HasProps}}props {{.Name}}Props,{{end}} children ...react.Element) *{{.Name}}Elem { - return &{{.Name}}Elem{ - Element: react.CreateElement(build{{.Name}}, {{if .HasProps}}props{{else}}nil{{end}}, children...), - } -} - -func ({{.Recv}} {{.Name}}Def) RendersElement() react.Element { - {{if .RendersSlice -}} - rr := t.Render() - elems := make([]react.Element, 0, len(rr)) - for _, r := range rr { - elems = append(elems, r) - } - return react.Fragment(elems...) - {{else -}} - return {{.Recv}}.Render() - {{end -}} -} - -{{if .HasState}} -// SetState is an auto-generated proxy proxy to update the state for the -// {{.Name}} component. SetState does not immediately mutate {{.Recv}}.State() -// but creates a pending state transition. -func ({{.Recv}} {{.Name}}Def) SetState(state {{.Name}}State) { - {{.Recv}}.ComponentDef.SetState(state) -} - -// State is an auto-generated proxy to return the current state in use for the -// render of the {{.Name}} component -func ({{.Recv}} {{.Name}}Def) State() {{.Name}}State { - return {{.Recv}}.ComponentDef.State().({{.Name}}State) -} - -// IsState is an auto-generated definition so that {{.Name}}State implements -// the myitcv.io/react.State interface. -func ({{.Recv}} {{.Name}}State) IsState() {} - -var _ react.State = {{.Name}}State{} - -// GetInitialStateIntf is an auto-generated proxy to GetInitialState -func ({{.Recv}} {{.Name}}Def) GetInitialStateIntf() react.State { -{{if .HasGetInitState -}} - return {{.Recv}}.GetInitialState() -{{else -}} - return {{.Name}}State{} -{{end -}} -} - -func ({{.Recv}} {{.Name}}State) EqualsIntf(val react.State) bool { - {{if .StateHasEquals -}} - return {{.Recv}}.Equals(val.({{.Name}}State)) - {{else -}} - return {{.Recv}} == val.({{.Name}}State) - {{end -}} -} -{{end}} - - -{{if .HasProps}} -// IsProps is an auto-generated definition so that {{.Name}}Props implements -// the myitcv.io/react.Props interface. -func ({{.Recv}} {{.Name}}Props) IsProps() {} - -// Props is an auto-generated proxy to the current props of {{.Name}} -func ({{.Recv}} {{.Name}}Def) Props() {{.Name}}Props { - uprops := {{.Recv}}.ComponentDef.Props() - return uprops.({{.Name}}Props) -} - -{{if .HasComponentWillReceiveProps}} -// ComponentWillReceivePropsIntf is an auto-generated proxy to -// ComponentWillReceiveProps -func ({{.Recv}} {{.Name}}Def) ComponentWillReceivePropsIntf(val interface{}) { - ourProps := val.({{.Name}}Props) - {{.Recv}}.ComponentWillReceiveProps(ourProps) -} -{{end}} - -func ({{.Recv}} {{.Name}}Props) EqualsIntf(val react.Props) bool { - {{if .PropsHasEquals -}} - return {{.Recv}}.Equals(val.({{.Name}}Props)) - {{else -}} - return {{.Recv}} == val.({{.Name}}Props) - {{end -}} -} - -var _ react.Props = {{.Name}}Props{} -{{end}} - `, cg) - - ofName := gogenerate.NameFile(name, reactGenCmd) - toWrite := cg.buf.Bytes() - - out, err := format.Source(toWrite) - if err == nil { - toWrite = out - } - - if err := ioutil.WriteFile(ofName, toWrite, 0644); err != nil { - fatalf("could not write %v: %v", ofName, err) - } -} diff --git a/vendor/myitcv.io/react/cmd/reactGen/core_gen.go b/vendor/myitcv.io/react/cmd/reactGen/core_gen.go deleted file mode 100644 index a696d9c1e..000000000 --- a/vendor/myitcv.io/react/cmd/reactGen/core_gen.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "strings" - "text/template" -) - -type coreGen struct { - *gen - - buf *bytes.Buffer -} - -func newCoreGen(g *gen) *coreGen { - return &coreGen{ - gen: g, - buf: bytes.NewBuffer(nil), - } -} - -func (c *coreGen) pf(format string, vals ...interface{}) { - fmt.Fprintf(c.buf, format, vals...) -} - -func (c *coreGen) pln(vals ...interface{}) { - fmt.Fprintln(c.buf, vals...) -} - -func (c *coreGen) pt(tmpl string, val interface{}) { - // on the basis most templates are for convenience define inline - // as raw string literals which start the ` on one line but then start - // the template on the next (for readability) we strip the first leading - // \n if one exists - tmpl = strings.TrimPrefix(tmpl, "\n") - - t := template.New("tmp") - - _, err := t.Parse(tmpl) - if err != nil { - fatalf("unable to parse template: %v", err) - } - - err = t.Execute(c.buf, val) - if err != nil { - fatalf("cannot execute template: %v", err) - } -} diff --git a/vendor/myitcv.io/react/cmd/reactGen/flag.go b/vendor/myitcv.io/react/cmd/reactGen/flag.go deleted file mode 100644 index 56f4f439b..000000000 --- a/vendor/myitcv.io/react/cmd/reactGen/flag.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - "myitcv.io/gogenerate" -) - -var ( - fLicenseFile = gogenerate.LicenseFileFlag() - fGoGenLog = gogenerate.LogFlag() - fInit initFlag -) - -type initFlag struct { - val *string -} - -func (f *initFlag) String() string { - return "(does not have a default value)" -} - -func (f *initFlag) Set(s string) error { - f.val = &s - return nil -} - -func init() { - flag.Var(&fInit, "init", "create a GopherJS React application using the specified template (see below)") -} - -func usage() { - f := func(format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, format, args...) - } - - l := func(args ...interface{}) { - fmt.Fprintln(os.Stderr, args...) - } - - l("Usage:") - f("\t%v [-init