Add Support Go Modules, Remove dep dependencies (#141)

- migrates the dependencies management from deb to go modules.
- `Gopkg.lock` and `Gopkg.toml` of dep removed and  `go.mod` and `go.sum` of modules added 
- replaces `dep endure` with `go mod verify` .travis.yaml
- updates vendor dependencies using `go mod tidy`,`go mod verify` and `go mod vendor` commands
- replaces `github.com/google/go-github/github` with `github.com/google/go-github/v27/github` for go module support in `pkg/controller/upgrade.go`
This commit is contained in:
coder 2019-08-05 14:55:46 +05:30 committed by Prasad Ghangal
parent 96fa505b59
commit 5013c3b736
1565 changed files with 224642 additions and 177929 deletions

View File

@ -1,13 +1,12 @@
language: go
go:
- 1.12
- '1.12'
services:
- docker
install:
- go get -u github.com/golang/dep/cmd/dep
- go get -u golang.org/x/lint/golint
- curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
- chmod +x get_helm.sh
@ -17,7 +16,7 @@ before_script:
- hack/verify-gofmt.sh
- hack/verify-govet.sh
- hack/verify-golint.sh
- dep ensure
- go mod verify
- helm lint helm/botkube
script:

620
Gopkg.lock generated
View File

@ -1,620 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
digest = "1:e95c8ee2ac122e3b55a7bef845c4c65bbd240685da1dbb48aee2d6854e16c758"
name = "github.com/alecthomas/log4go"
packages = ["."]
pruneopts = "UT"
revision = "d146e6b86faab5f7e4d135cc63b749d74bf0d4a6"
[[projects]]
digest = "1:6b21090f60571b20b3ddc2c8e48547dffcf409498ed6002c2cada023725ed377"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "782f4967f2dc4564575ca782fe2d04090b5faca8"
[[projects]]
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
digest = "1:b7a8552c62868d867795b63eaf4f45d3e92d36db82b428e680b9c95a8c33e5b1"
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys",
]
pruneopts = "UT"
revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
version = "v0.5"
[[projects]]
digest = "1:2edd2416f89b4e841df0e4a78802ce14d2bc7ad79eba1a45986e39f0f8cb7d87"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "UT"
revision = "44145f04b68cf362d9c4df2182967c2275eaefed"
[[projects]]
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df"
name = "github.com/google/btree"
packages = ["."]
pruneopts = "UT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
digest = "1:dc8f33fafa8072398474e182c9c7dfcd6f1b7c688d78c779f7d19a18720ea5dc"
name = "github.com/google/go-github"
packages = ["github"]
pruneopts = "UT"
revision = "445d3bd422a8e75398ca4c1a6a287d05056f4068"
version = "v26.0.8"
[[projects]]
digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
name = "github.com/google/go-querystring"
packages = ["query"]
pruneopts = "UT"
revision = "44c6ddd0a2342c386950e880b658017258da92fc"
version = "v1.0.0"
[[projects]]
digest = "1:41bfd4219241b7f7d6e6fdb13fc712576f1337e68e6b895136283b76928fdd66"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "UT"
revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c"
[[projects]]
digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b"
name = "github.com/google/uuid"
packages = ["."]
pruneopts = "UT"
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
version = "v1.1.1"
[[projects]]
digest = "1:75eb87381d25cc75212f52358df9c3a2719584eaa9685cd510ce28699122f39d"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions",
]
pruneopts = "UT"
revision = "0c5108395e2debce0d731cf0287ddf7242066aba"
[[projects]]
digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = "UT"
revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d"
version = "v1.4.0"
[[projects]]
digest = "1:878f0defa9b853f9acfaf4a162ba450a89d0050eff084f9fe7f5bd15948f172a"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = "UT"
revision = "787624de3eb7bd915c329cba748687a3b22666a6"
[[projects]]
digest = "1:3f90d23757c18b1e07bf11494dbe737ee2c44d881c0f41e681611abdadad62fa"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "UT"
revision = "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
[[projects]]
digest = "1:3e260afa138eab6492b531a3b3d10ab4cb70512d423faa78b8949dec76e66a21"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "UT"
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
version = "v0.3.5"
[[projects]]
digest = "1:eaefc85d32c03e5f0c2b88ea2f79fce3d993e2c78316d21319575dd4ea9153ca"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "UT"
revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
version = "1.1.4"
[[projects]]
digest = "1:0a69a1c0db3591fcefb47f115b224592c8dfa4368b7ba9fae509d5e16cdc95c8"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = "UT"
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
version = "v1.0.1"
[[projects]]
branch = "master"
digest = "1:b8774f0cf1c719f5253e92f1fc7880b4186046064c3c23ead99ab568028a6510"
name = "github.com/mailru/easyjson"
packages = [
".",
"buffer",
"jlexer",
"jwriter",
]
pruneopts = "UT"
revision = "1de009706dbeb9d05f18586f0735fcdb7c524481"
[[projects]]
digest = "1:b0654f4992357c0494eadd434722dc67072bf81476784f47a048c58d1f9099a4"
name = "github.com/mattermost/mattermost-server"
packages = [
"model",
"utils/markdown",
]
pruneopts = "UT"
revision = "416c7523c3097af6759b7c40075df51d44624672"
version = "v4.9.4"
[[projects]]
digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = "UT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:c56ad36f5722eb07926c979d5e80676ee007a9e39e7808577b9d87ec92b00460"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = "UT"
revision = "94122c33edd36123c84d5368cfb2b69df93a0ec8"
version = "v1.0.1"
[[projects]]
digest = "1:07140002dbf37da92090f731b46fa47be4820b82fe5c14a035203b0e813d0ec2"
name = "github.com/nicksnyder/go-i18n"
packages = [
"i18n",
"i18n/bundle",
"i18n/language",
"i18n/translation",
]
pruneopts = "UT"
revision = "0dc1626d56435e9d605a29875701721c54bc9bbd"
version = "v1.10.0"
[[projects]]
digest = "1:95d38d218bf2290987c6b0e885a9f0f2d3d3239235acaddca01c3fe36e5e5566"
name = "github.com/nlopes/slack"
packages = [
".",
"slackutilsx",
]
pruneopts = "UT"
revision = "b9033a72a20bf84563485e86a2adbea4bf265804"
version = "v0.4.0"
[[projects]]
digest = "1:d1da5ee400cc3c39cf7d920c2db4b04d9407e24a917bf8b99a183aa024081871"
name = "github.com/olivere/elastic"
packages = [
".",
"config",
"uritemplates",
]
pruneopts = "UT"
revision = "407a1d940dcc89e279c579eb393d0bdde8fd9ac4"
version = "v6.2.16"
[[projects]]
digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9"
name = "github.com/pborman/uuid"
packages = ["."]
pruneopts = "UT"
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
version = "v1.2"
[[projects]]
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
name = "github.com/pelletier/go-toml"
packages = ["."]
pruneopts = "UT"
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = "UT"
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:0e7775ebbcf00d8dd28ac663614af924411c868dca3d5aa762af0fae3808d852"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = "UT"
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:69b1cc331fca23d702bd72f860c6a647afd0aa9fcbc1d0659b1365e26546dd70"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = "UT"
revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
version = "v1.2.0"
[[projects]]
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
digest = "1:25f2747b063c0a656195ef85413cef8c9f2bbe128deab7d39563a6ca1e536070"
name = "github.com/stretchr/testify"
packages = ["assert"]
pruneopts = "UT"
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
version = "v1.1.4"
[[projects]]
branch = "master"
digest = "1:ead00edfeb6c6d5709cafa8b8ca88eabe6be6cbd050fa8adc377358003cc8957"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"cast5",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"ssh/terminal",
]
pruneopts = "UT"
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
[[projects]]
branch = "release-branch.go1.10"
digest = "1:bdc99a0ff03b87c7fe65dce8cfd5f7db86d65446850b2c655db7bc740f16aded"
name = "golang.org/x/net"
packages = [
"context",
"http2",
"http2/hpack",
"idna",
"lex/httplex",
]
pruneopts = "UT"
revision = "0ed95abb35c445290478a5348a7b38bb154135fd"
[[projects]]
digest = "1:9359217acc6040b4be710ce34473acef28023ad39bfafecea34ffaea7f1e1890"
name = "golang.org/x/oauth2"
packages = [
".",
"internal",
]
pruneopts = "UT"
revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
[[projects]]
branch = "master"
digest = "1:3d5e79e10549fd9119cbefd614b6d351ef5bd0be2f2b103a4199788e784cbc68"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "UT"
revision = "b4a75ba826a64a70990f11a225237acd6ef35c9f"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
digest = "1:d37b0ef2944431fe9e8ef35c6fffc8990d9e2ca300588df94a6890f3649ae365"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "UT"
revision = "f51c12702a4d776e4c1fa9b0fabab841babae631"
[[projects]]
digest = "1:6f3bd49ddf2e104e52062774d797714371fac1b8bddfd8e124ce78e6b2264a10"
name = "google.golang.org/appengine"
packages = [
"internal",
"internal/base",
"internal/datastore",
"internal/log",
"internal/remote_api",
"internal/urlfetch",
"urlfetch",
]
pruneopts = "UT"
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
version = "v1.4.0"
[[projects]]
digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "UT"
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
[[projects]]
digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[[projects]]
branch = "master"
digest = "1:5d9a48e3144dfc06d3c5218b38164102f62300e83ffdd9a2d87b3c0b4135612f"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"autoscaling/v2beta2",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"coordination/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = "UT"
revision = "173ce66c1e39d1d0f56e0b3347ff2988068aecd0"
[[projects]]
digest = "1:2a883fd9627f43bbc104dc01b3bdf5956b7b36fbeb1f4957acdad7f778c576aa"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/naming",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/reflect",
]
pruneopts = "UT"
revision = "98853ca904e81a25e2000cae7f077dc30f81b85f"
[[projects]]
digest = "1:5f64f1dfa0d1a59ea1b4830786dababffbd0ba4e6fb4c9eb4cdfb649bdd207a8"
name = "k8s.io/client-go"
packages = [
"discovery",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta2",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/coordination/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"rest",
"rest/watch",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/retry",
]
pruneopts = "UT"
revision = "1638f8970cefaa404ff3a62950f88b08292b2696"
version = "v9.0.0"
[[projects]]
digest = "1:e2999bf1bb6eddc2a6aa03fe5e6629120a53088926520ca3b4765f77d7ff7eab"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "UT"
revision = "8139d8cb77af419532b33dfa7dd09fbc5f1d344f"
[[projects]]
digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849"
name = "sigs.k8s.io/yaml"
packages = ["."]
pruneopts = "UT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
version = "v1.1.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/fsnotify/fsnotify",
"github.com/google/go-github/github",
"github.com/mattermost/mattermost-server/model",
"github.com/nlopes/slack",
"github.com/olivere/elastic",
"github.com/sirupsen/logrus",
"github.com/stretchr/testify/assert",
"gopkg.in/yaml.v2",
"k8s.io/api/apps/v1beta1",
"k8s.io/api/batch/v1",
"k8s.io/api/core/v1",
"k8s.io/api/extensions/v1beta1",
"k8s.io/api/rbac/v1",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/fields",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/rest",
"k8s.io/client-go/tools/cache",
"k8s.io/client-go/tools/clientcmd",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,35 +0,0 @@
[[constraint]]
name = "github.com/nlopes/slack"
version = "0.4.0"
[[constraint]]
name = "github.com/sirupsen/logrus"
version = "1.2.0"
[[constraint]]
name = "gopkg.in/yaml.v2"
version = "2.2.2"
[[constraint]]
branch = "master"
name = "k8s.io/api"
[[constraint]]
name = "k8s.io/client-go"
version = "9.0.0"
[prune]
go-tests = true
unused-packages = true
[[constraint]]
name = "github.com/olivere/elastic"
version = "6.0.0"
[[constraint]]
name = "github.com/mattermost/mattermost-server"
version = "4.9.4"
[[constraint]]
name = "github.com/fsnotify/fsnotify"
version = "1.4.7"

55
go.mod Normal file
View File

@ -0,0 +1,55 @@
module github.com/infracloudio/botkube
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/Masterminds/squirrel v1.1.0 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8 // indirect
github.com/fortytw2/leaktest v1.3.0 // indirect
github.com/fsnotify/fsnotify v1.4.7
github.com/go-gorp/gorp v2.0.0+incompatible // indirect
github.com/go-ldap/ldap v3.0.3+incompatible // indirect
github.com/go-redis/redis v6.15.2+incompatible // indirect
github.com/go-sql-driver/mysql v1.4.1 // indirect
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
github.com/golang/protobuf v1.3.2 // indirect
github.com/google/go-github/v27 v27.0.4
github.com/googleapis/gnostic v0.3.0 // indirect
github.com/gorilla/websocket v1.4.0 // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/imdario/mergo v0.3.7 // indirect
github.com/json-iterator/go v1.1.7 // indirect
github.com/lib/pq v1.2.0 // indirect
github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5 // indirect
github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018 // indirect
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect
github.com/mattermost/gorp v2.0.0+incompatible // indirect
github.com/mattermost/mattermost-server v5.11.1+incompatible
github.com/mattn/go-sqlite3 v1.11.0 // indirect
github.com/nicksnyder/go-i18n v1.10.1 // indirect
github.com/nlopes/slack v0.4.0
github.com/olivere/elastic v6.2.21+incompatible
github.com/pborman/uuid v1.2.0 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/stretchr/objx v0.2.0 // indirect
github.com/ziutek/mymysql v1.5.4 // indirect
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0 // indirect
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
google.golang.org/appengine v1.6.1 // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.2.2
k8s.io/api v0.0.0-20190802020852-fde72677e822
k8s.io/apimachinery v0.0.0-20190731142807-035e418f1ad9
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
k8s.io/klog v0.3.3 // indirect
k8s.io/utils v0.0.0-20190801114015-581e00157fb1 // indirect
)

260
go.sum Normal file
View File

@ -0,0 +1,260 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/squirrel v1.1.0 h1:baP1qLdoQCeTw3ifCdOq2dkYc6vGcmRdaociKLbEJXs=
github.com/Masterminds/squirrel v1.1.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8 h1:6muCmMJat6z7qptVrIf/+OWPxsjAfvhw5/6t+FwEkgg=
github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8/go.mod h1:nYia/MIs9OyvXXYboPmNOj0gVWo97Wx0sde+ZuKkoM4=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gorp/gorp v2.0.0+incompatible h1:dIQPsBtl6/H1MjVseWuWPXa7ET4p6Dve4j3Hg+UjqYw=
github.com/go-gorp/gorp v2.0.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E=
github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=
github.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-github/v27 v27.0.4 h1:N/EEqsvJLgqTbepTiMBz+12KhwLovv6YvwpRezd+4Fg=
github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5 h1:AsEBgzv3DhuYHI/GiQh2HxvTP71HCCE9E/tzGUzGdtU=
github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5/go.mod h1:c2mYKRyMb1BPkO5St0c/ps62L4S0W2NAkaTXj9qEI+0=
github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018 h1:MNApn+Z+fIT4NPZopPfCc1obT6aY3SVM6DOctz1A9ZU=
github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018/go.mod h1:sFlOUpQL1YcjhFVXhg1CG8ZASEs/Mf1oVb6H75JL/zg=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattermost/gorp v2.0.0+incompatible h1:/aLYsuWjk4kSVP3P3kn3zTGNUQxaCB6DjTejmwddKEs=
github.com/mattermost/gorp v2.0.0+incompatible/go.mod h1:0kX1qa3DOpaPJyOdMLeo7TcBN0QmUszj9a/VygOhDe0=
github.com/mattermost/mattermost-server v5.11.1+incompatible h1:LPzKY0+2Tic/ik67qIg6VrydRCgxNXZQXOeaiJ2rMBY=
github.com/mattermost/mattermost-server v5.11.1+incompatible/go.mod h1:5L6MjAec+XXQwMIt791Ganu45GKsSiM+I0tLR9wUj8Y=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nicksnyder/go-i18n v1.10.1 h1:isfg77E/aCD7+0lD/D00ebR2MV5vgeQ276WYyDaCRQc=
github.com/nicksnyder/go-i18n v1.10.1/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4=
github.com/nlopes/slack v0.4.0 h1:OVnHm7lv5gGT5gkcHsZAyw++oHVFihbjWbL3UceUpiA=
github.com/nlopes/slack v0.4.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM=
github.com/olivere/elastic v6.2.21+incompatible h1:QnTuofzxOCV5FrYLywjkMxOmOWhAeild1VXxKRksK9Y=
github.com/olivere/elastic v6.2.21+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
k8s.io/api v0.0.0-20190802020852-fde72677e822 h1:1cICZffeuIWfUkAP8YcMNIyG5A5bW5wObpIA/j2nzVE=
k8s.io/api v0.0.0-20190802020852-fde72677e822/go.mod h1:1KBIEXI7qpsZM6s0w+RthBLGH1IYg7JQlerAl+wN1lw=
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
k8s.io/apimachinery v0.0.0-20190731142807-035e418f1ad9 h1:RedGIevS9N0IS0QVL+kWS8ZNBQVXh7nioDippOKAuak=
k8s.io/apimachinery v0.0.0-20190731142807-035e418f1ad9/go.mod h1:+ntn62igV2hyNj7/0brOvXSMONE2KxcePkSxK7/9FFQ=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c=
k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE=
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -189,7 +189,7 @@ func (b *mmBot) getTeam() *model.Team {
// Check if botkube user exists in Mattermost
func (b *mmBot) getUser() *model.User {
users, resp := client.AutocompleteUsersInTeam(b.getTeam().Id, BotName, "")
users, resp := client.AutocompleteUsersInTeam(b.getTeam().Id, BotName, 1, "")
if resp.Error != nil {
logging.Logger.Fatal("There was a problem finding Mattermost user ", BotName, "\nError: ", resp.Error)
}

View File

@ -6,7 +6,7 @@ import (
"os"
"time"
"github.com/google/go-github/github"
"github.com/google/go-github/v27/github"
"github.com/infracloudio/botkube/pkg/config"
log "github.com/infracloudio/botkube/pkg/logging"
)

View File

@ -1,2 +0,0 @@
*.sw[op]
.DS_Store

View File

@ -1,13 +0,0 @@
Copyright (c) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,14 +0,0 @@
# This is an unmaintained fork, left only so it doesn't break imports.
Please see http://log4go.googlecode.com/
Installation:
- Run `goinstall log4go.googlecode.com/hg`
Usage:
- Add the following import:
import l4g "log4go.googlecode.com/hg"
Acknowledgements:
- pomack
For providing awesome patches to bring log4go up to the latest Go spec

View File

@ -1,296 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
package log4go
import (
"encoding/xml"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
type xmlProperty struct {
Name string `xml:"name,attr"`
Value string `xml:",chardata"`
}
type xmlFilter struct {
Enabled string `xml:"enabled,attr"`
Tag string `xml:"tag"`
Level string `xml:"level"`
Type string `xml:"type"`
Property []xmlProperty `xml:"property"`
}
type xmlLoggerConfig struct {
Filter []xmlFilter `xml:"filter"`
}
// Load XML configuration; see examples/example.xml for documentation
func (log Logger) LoadConfiguration(filename string) {
log.Close()
// Open the configuration file
fd, err := os.Open(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not open %q for reading: %s\n", filename, err)
os.Exit(1)
}
contents, err := ioutil.ReadAll(fd)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not read %q: %s\n", filename, err)
os.Exit(1)
}
xc := new(xmlLoggerConfig)
if err := xml.Unmarshal(contents, xc); err != nil {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not parse XML configuration in %q: %s\n", filename, err)
os.Exit(1)
}
for _, xmlfilt := range xc.Filter {
var filt LogWriter
var lvl Level
bad, good, enabled := false, true, false
// Check required children
if len(xmlfilt.Enabled) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required attribute %s for filter missing in %s\n", "enabled", filename)
bad = true
} else {
enabled = xmlfilt.Enabled != "false"
}
if len(xmlfilt.Tag) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "tag", filename)
bad = true
}
if len(xmlfilt.Type) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "type", filename)
bad = true
}
if len(xmlfilt.Level) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "level", filename)
bad = true
}
switch xmlfilt.Level {
case "FINEST":
lvl = FINEST
case "FINE":
lvl = FINE
case "DEBUG":
lvl = DEBUG
case "TRACE":
lvl = TRACE
case "INFO":
lvl = INFO
case "WARNING":
lvl = WARNING
case "ERROR":
lvl = ERROR
case "CRITICAL":
lvl = CRITICAL
default:
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter has unknown value in %s: %s\n", "level", filename, xmlfilt.Level)
bad = true
}
// Just so all of the required attributes are errored at the same time if missing
if bad {
os.Exit(1)
}
switch xmlfilt.Type {
case "console":
filt, good = xmlToConsoleLogWriter(filename, xmlfilt.Property, enabled)
case "file":
filt, good = xmlToFileLogWriter(filename, xmlfilt.Property, enabled)
case "xml":
filt, good = xmlToXMLLogWriter(filename, xmlfilt.Property, enabled)
case "socket":
filt, good = xmlToSocketLogWriter(filename, xmlfilt.Property, enabled)
default:
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not load XML configuration in %s: unknown filter type \"%s\"\n", filename, xmlfilt.Type)
os.Exit(1)
}
// Just so all of the required params are errored at the same time if wrong
if !good {
os.Exit(1)
}
// If we're disabled (syntax and correctness checks only), don't add to logger
if !enabled {
continue
}
log[xmlfilt.Tag] = &Filter{lvl, filt}
}
}
func xmlToConsoleLogWriter(filename string, props []xmlProperty, enabled bool) (*ConsoleLogWriter, bool) {
format := "[%D %T] [%L] (%S) %M"
// Parse properties
for _, prop := range props {
switch prop.Name {
case "format":
format = strings.Trim(prop.Value, " \r\n")
default:
fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for console filter in %s\n", prop.Name, filename)
}
}
// If it's disabled, we're just checking syntax
if !enabled {
return nil, true
}
clw := NewConsoleLogWriter()
clw.SetFormat(format)
return clw, true
}
// Parse a number with K/M/G suffixes based on thousands (1000) or 2^10 (1024)
func strToNumSuffix(str string, mult int) int {
num := 1
if len(str) > 1 {
switch str[len(str)-1] {
case 'G', 'g':
num *= mult
fallthrough
case 'M', 'm':
num *= mult
fallthrough
case 'K', 'k':
num *= mult
str = str[0 : len(str)-1]
}
}
parsed, _ := strconv.Atoi(str)
return parsed * num
}
func xmlToFileLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) {
file := ""
format := "[%D %T] [%L] (%S) %M"
maxlines := 0
maxsize := 0
daily := false
rotate := false
// Parse properties
for _, prop := range props {
switch prop.Name {
case "filename":
file = strings.Trim(prop.Value, " \r\n")
case "format":
format = strings.Trim(prop.Value, " \r\n")
case "maxlines":
maxlines = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000)
case "maxsize":
maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024)
case "daily":
daily = strings.Trim(prop.Value, " \r\n") != "false"
case "rotate":
rotate = strings.Trim(prop.Value, " \r\n") != "false"
default:
fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename)
}
}
// Check properties
if len(file) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "filename", filename)
return nil, false
}
// If it's disabled, we're just checking syntax
if !enabled {
return nil, true
}
flw := NewFileLogWriter(file, rotate)
flw.SetFormat(format)
flw.SetRotateLines(maxlines)
flw.SetRotateSize(maxsize)
flw.SetRotateDaily(daily)
return flw, true
}
func xmlToXMLLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) {
file := ""
maxrecords := 0
maxsize := 0
daily := false
rotate := false
// Parse properties
for _, prop := range props {
switch prop.Name {
case "filename":
file = strings.Trim(prop.Value, " \r\n")
case "maxrecords":
maxrecords = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000)
case "maxsize":
maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024)
case "daily":
daily = strings.Trim(prop.Value, " \r\n") != "false"
case "rotate":
rotate = strings.Trim(prop.Value, " \r\n") != "false"
default:
fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for xml filter in %s\n", prop.Name, filename)
}
}
// Check properties
if len(file) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for xml filter missing in %s\n", "filename", filename)
return nil, false
}
// If it's disabled, we're just checking syntax
if !enabled {
return nil, true
}
xlw := NewXMLLogWriter(file, rotate)
xlw.SetRotateLines(maxrecords)
xlw.SetRotateSize(maxsize)
xlw.SetRotateDaily(daily)
return xlw, true
}
func xmlToSocketLogWriter(filename string, props []xmlProperty, enabled bool) (SocketLogWriter, bool) {
endpoint := ""
protocol := "udp"
// Parse properties
for _, prop := range props {
switch prop.Name {
case "endpoint":
endpoint = strings.Trim(prop.Value, " \r\n")
case "protocol":
protocol = strings.Trim(prop.Value, " \r\n")
default:
fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename)
}
}
// Check properties
if len(endpoint) == 0 {
fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "endpoint", filename)
return nil, false
}
// If it's disabled, we're just checking syntax
if !enabled {
return nil, true
}
return NewSocketLogWriter(protocol, endpoint), true
}

View File

@ -1,264 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
package log4go
import (
"fmt"
"os"
"time"
)
// This log writer sends output to a file
type FileLogWriter struct {
rec chan *LogRecord
rot chan bool
// The opened file
filename string
file *os.File
// The logging format
format string
// File header/trailer
header, trailer string
// Rotate at linecount
maxlines int
maxlines_curlines int
// Rotate at size
maxsize int
maxsize_cursize int
// Rotate daily
daily bool
daily_opendate int
// Keep old logfiles (.001, .002, etc)
rotate bool
maxbackup int
}
// This is the FileLogWriter's output method
func (w *FileLogWriter) LogWrite(rec *LogRecord) {
w.rec <- rec
}
func (w *FileLogWriter) Close() {
close(w.rec)
w.file.Sync()
}
// NewFileLogWriter creates a new LogWriter which writes to the given file and
// has rotation enabled if rotate is true.
//
// If rotate is true, any time a new log file is opened, the old one is renamed
// with a .### extension to preserve it. The various Set* methods can be used
// to configure log rotation based on lines, size, and daily.
//
// The standard log-line format is:
// [%D %T] [%L] (%S) %M
func NewFileLogWriter(fname string, rotate bool) *FileLogWriter {
w := &FileLogWriter{
rec: make(chan *LogRecord, LogBufferLength),
rot: make(chan bool),
filename: fname,
format: "[%D %T] [%L] (%S) %M",
rotate: rotate,
maxbackup: 999,
}
// open the file for the first time
if err := w.intRotate(); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
return nil
}
go func() {
defer func() {
if w.file != nil {
fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()}))
w.file.Close()
}
}()
for {
select {
case <-w.rot:
if err := w.intRotate(); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
return
}
case rec, ok := <-w.rec:
if !ok {
return
}
now := time.Now()
if (w.maxlines > 0 && w.maxlines_curlines >= w.maxlines) ||
(w.maxsize > 0 && w.maxsize_cursize >= w.maxsize) ||
(w.daily && now.Day() != w.daily_opendate) {
if err := w.intRotate(); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
return
}
}
// Perform the write
n, err := fmt.Fprint(w.file, FormatLogRecord(w.format, rec))
if err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
return
}
// Update the counts
w.maxlines_curlines++
w.maxsize_cursize += n
}
}
}()
return w
}
// Request that the logs rotate
func (w *FileLogWriter) Rotate() {
w.rot <- true
}
// If this is called in a threaded context, it MUST be synchronized
func (w *FileLogWriter) intRotate() error {
// Close any log file that may be open
if w.file != nil {
fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()}))
w.file.Close()
}
// If we are keeping log files, move it to the next available number
if w.rotate {
_, err := os.Lstat(w.filename)
if err == nil { // file exists
// Find the next available number
num := 1
fname := ""
if w.daily && time.Now().Day() != w.daily_opendate {
yesterday := time.Now().AddDate(0, 0, -1).Format("2006-01-02")
for ; err == nil && num <= w.maxbackup; num++ {
fname = w.filename + fmt.Sprintf(".%s.%03d", yesterday, num)
_, err = os.Lstat(fname)
}
// return error if the last file checked still existed
if err == nil {
return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.filename)
}
} else {
num = w.maxbackup - 1
for ; num >= 1; num-- {
fname = w.filename + fmt.Sprintf(".%d", num)
nfname := w.filename + fmt.Sprintf(".%d", num+1)
_, err = os.Lstat(fname)
if err == nil {
os.Rename(fname, nfname)
}
}
}
w.file.Close()
// Rename the file to its newfound home
err = os.Rename(w.filename, fname)
if err != nil {
return fmt.Errorf("Rotate: %s\n", err)
}
}
}
// Open the log file
fd, err := os.OpenFile(w.filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
if err != nil {
return err
}
w.file = fd
now := time.Now()
fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: now}))
// Set the daily open date to the current date
w.daily_opendate = now.Day()
// initialize rotation values
w.maxlines_curlines = 0
w.maxsize_cursize = 0
return nil
}
// Set the logging format (chainable). Must be called before the first log
// message is written.
func (w *FileLogWriter) SetFormat(format string) *FileLogWriter {
w.format = format
return w
}
// Set the logfile header and footer (chainable). Must be called before the first log
// message is written. These are formatted similar to the FormatLogRecord (e.g.
// you can use %D and %T in your header/footer for date and time).
func (w *FileLogWriter) SetHeadFoot(head, foot string) *FileLogWriter {
w.header, w.trailer = head, foot
if w.maxlines_curlines == 0 {
fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: time.Now()}))
}
return w
}
// Set rotate at linecount (chainable). Must be called before the first log
// message is written.
func (w *FileLogWriter) SetRotateLines(maxlines int) *FileLogWriter {
//fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateLines: %v\n", maxlines)
w.maxlines = maxlines
return w
}
// Set rotate at size (chainable). Must be called before the first log message
// is written.
func (w *FileLogWriter) SetRotateSize(maxsize int) *FileLogWriter {
//fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateSize: %v\n", maxsize)
w.maxsize = maxsize
return w
}
// Set rotate daily (chainable). Must be called before the first log message is
// written.
func (w *FileLogWriter) SetRotateDaily(daily bool) *FileLogWriter {
//fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateDaily: %v\n", daily)
w.daily = daily
return w
}
// Set max backup files. Must be called before the first log message
// is written.
func (w *FileLogWriter) SetRotateMaxBackup(maxbackup int) *FileLogWriter {
w.maxbackup = maxbackup
return w
}
// SetRotate changes whether or not the old logs are kept. (chainable) Must be
// called before the first log message is written. If rotate is false, the
// files are overwritten; otherwise, they are rotated to another file before the
// new log is opened.
func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter {
//fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotate: %v\n", rotate)
w.rotate = rotate
return w
}
// NewXMLLogWriter is a utility method for creating a FileLogWriter set up to
// output XML record log messages instead of line-based ones.
func NewXMLLogWriter(fname string, rotate bool) *FileLogWriter {
return NewFileLogWriter(fname, rotate).SetFormat(
` <record level="%L">
<timestamp>%D %T</timestamp>
<source>%S</source>
<message>%M</message>
</record>`).SetHeadFoot("<log created=\"%D %T\">", "</log>")
}

View File

@ -1,484 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
// Package log4go provides level-based and highly configurable logging.
//
// Enhanced Logging
//
// This is inspired by the logging functionality in Java. Essentially, you create a Logger
// object and create output filters for it. You can send whatever you want to the Logger,
// and it will filter that based on your settings and send it to the outputs. This way, you
// can put as much debug code in your program as you want, and when you're done you can filter
// out the mundane messages so only the important ones show up.
//
// Utility functions are provided to make life easier. Here is some example code to get started:
//
// log := log4go.NewLogger()
// log.AddFilter("stdout", log4go.DEBUG, log4go.NewConsoleLogWriter())
// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true))
// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02"))
//
// The first two lines can be combined with the utility NewDefaultLogger:
//
// log := log4go.NewDefaultLogger(log4go.DEBUG)
// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true))
// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02"))
//
// Usage notes:
// - The ConsoleLogWriter does not display the source of the message to standard
// output, but the FileLogWriter does.
// - The utility functions (Info, Debug, Warn, etc) derive their source from the
// calling function, and this incurs extra overhead.
//
// Changes from 2.0:
// - The external interface has remained mostly stable, but a lot of the
// internals have been changed, so if you depended on any of this or created
// your own LogWriter, then you will probably have to update your code. In
// particular, Logger is now a map and ConsoleLogWriter is now a channel
// behind-the-scenes, and the LogWrite method no longer has return values.
//
// Future work: (please let me know if you think I should work on any of these particularly)
// - Log file rotation
// - Logging configuration files ala log4j
// - Have the ability to remove filters?
// - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows
// for another method of logging
// - Add an XML filter type
package log4go
import (
"errors"
"fmt"
"os"
"runtime"
"strings"
"time"
)
// Version information
const (
L4G_VERSION = "log4go-v3.0.1"
L4G_MAJOR = 3
L4G_MINOR = 0
L4G_BUILD = 1
)
/****** Constants ******/
// These are the integer logging levels used by the logger
type Level int
const (
FINEST Level = iota
FINE
DEBUG
TRACE
INFO
WARNING
ERROR
CRITICAL
)
// Logging level strings
var (
levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"}
)
func (l Level) String() string {
if l < 0 || int(l) > len(levelStrings) {
return "UNKNOWN"
}
return levelStrings[int(l)]
}
/****** Variables ******/
var (
// LogBufferLength specifies how many log messages a particular log4go
// logger can buffer at a time before writing them.
LogBufferLength = 32
)
/****** LogRecord ******/
// A LogRecord contains all of the pertinent information for each message
type LogRecord struct {
Level Level // The log level
Created time.Time // The time at which the log message was created (nanoseconds)
Source string // The message source
Message string // The log message
}
/****** LogWriter ******/
// This is an interface for anything that should be able to write logs
type LogWriter interface {
// This will be called to log a LogRecord message.
LogWrite(rec *LogRecord)
// This should clean up anything lingering about the LogWriter, as it is called before
// the LogWriter is removed. LogWrite should not be called after Close.
Close()
}
/****** Logger ******/
// A Filter represents the log level below which no log records are written to
// the associated LogWriter.
type Filter struct {
Level Level
LogWriter
}
// A Logger represents a collection of Filters through which log messages are
// written.
type Logger map[string]*Filter
// Create a new logger.
//
// DEPRECATED: Use make(Logger) instead.
func NewLogger() Logger {
os.Stderr.WriteString("warning: use of deprecated NewLogger\n")
return make(Logger)
}
// Create a new logger with a "stdout" filter configured to send log messages at
// or above lvl to standard output.
//
// DEPRECATED: use NewDefaultLogger instead.
func NewConsoleLogger(lvl Level) Logger {
os.Stderr.WriteString("warning: use of deprecated NewConsoleLogger\n")
return Logger{
"stdout": &Filter{lvl, NewConsoleLogWriter()},
}
}
// Create a new logger with a "stdout" filter configured to send log messages at
// or above lvl to standard output.
func NewDefaultLogger(lvl Level) Logger {
return Logger{
"stdout": &Filter{lvl, NewConsoleLogWriter()},
}
}
// Closes all log writers in preparation for exiting the program or a
// reconfiguration of logging. Calling this is not really imperative, unless
// you want to guarantee that all log messages are written. Close removes
// all filters (and thus all LogWriters) from the logger.
func (log Logger) Close() {
// Close all open loggers
for name, filt := range log {
filt.Close()
delete(log, name)
}
}
// Add a new LogWriter to the Logger which will only log messages at lvl or
// higher. This function should not be called from multiple goroutines.
// Returns the logger for chaining.
func (log Logger) AddFilter(name string, lvl Level, writer LogWriter) Logger {
log[name] = &Filter{lvl, writer}
return log
}
/******* Logging *******/
// Send a formatted log message internally
func (log Logger) intLogf(lvl Level, format string, args ...interface{}) {
skip := true
// Determine if any logging will be done
for _, filt := range log {
if lvl >= filt.Level {
skip = false
break
}
}
if skip {
return
}
// Determine caller func
pc, _, lineno, ok := runtime.Caller(2)
src := ""
if ok {
src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno)
}
msg := format
if len(args) > 0 {
msg = fmt.Sprintf(format, args...)
}
// Make the log record
rec := &LogRecord{
Level: lvl,
Created: time.Now(),
Source: src,
Message: msg,
}
// Dispatch the logs
for _, filt := range log {
if lvl < filt.Level {
continue
}
filt.LogWrite(rec)
}
}
// Send a closure log message internally
func (log Logger) intLogc(lvl Level, closure func() string) {
skip := true
// Determine if any logging will be done
for _, filt := range log {
if lvl >= filt.Level {
skip = false
break
}
}
if skip {
return
}
// Determine caller func
pc, _, lineno, ok := runtime.Caller(2)
src := ""
if ok {
src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno)
}
// Make the log record
rec := &LogRecord{
Level: lvl,
Created: time.Now(),
Source: src,
Message: closure(),
}
// Dispatch the logs
for _, filt := range log {
if lvl < filt.Level {
continue
}
filt.LogWrite(rec)
}
}
// Send a log message with manual level, source, and message.
func (log Logger) Log(lvl Level, source, message string) {
skip := true
// Determine if any logging will be done
for _, filt := range log {
if lvl >= filt.Level {
skip = false
break
}
}
if skip {
return
}
// Make the log record
rec := &LogRecord{
Level: lvl,
Created: time.Now(),
Source: source,
Message: message,
}
// Dispatch the logs
for _, filt := range log {
if lvl < filt.Level {
continue
}
filt.LogWrite(rec)
}
}
// Logf logs a formatted log message at the given log level, using the caller as
// its source.
func (log Logger) Logf(lvl Level, format string, args ...interface{}) {
log.intLogf(lvl, format, args...)
}
// Logc logs a string returned by the closure at the given log level, using the caller as
// its source. If no log message would be written, the closure is never called.
func (log Logger) Logc(lvl Level, closure func() string) {
log.intLogc(lvl, closure)
}
// Finest logs a message at the finest log level.
// See Debug for an explanation of the arguments.
func (log Logger) Finest(arg0 interface{}, args ...interface{}) {
const (
lvl = FINEST
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
log.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
log.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Fine logs a message at the fine log level.
// See Debug for an explanation of the arguments.
func (log Logger) Fine(arg0 interface{}, args ...interface{}) {
const (
lvl = FINE
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
log.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
log.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Debug is a utility method for debug log messages.
// The behavior of Debug depends on the first argument:
// - arg0 is a string
// When given a string as the first argument, this behaves like Logf but with
// the DEBUG log level: the first argument is interpreted as a format for the
// latter arguments.
// - arg0 is a func()string
// When given a closure of type func()string, this logs the string returned by
// the closure iff it will be logged. The closure runs at most one time.
// - arg0 is interface{}
// When given anything else, the log message will be each of the arguments
// formatted with %v and separated by spaces (ala Sprint).
func (log Logger) Debug(arg0 interface{}, args ...interface{}) {
const (
lvl = DEBUG
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
log.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
log.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Trace logs a message at the trace log level.
// See Debug for an explanation of the arguments.
func (log Logger) Trace(arg0 interface{}, args ...interface{}) {
const (
lvl = TRACE
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
log.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
log.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Info logs a message at the info log level.
// See Debug for an explanation of the arguments.
func (log Logger) Info(arg0 interface{}, args ...interface{}) {
const (
lvl = INFO
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
log.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
log.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Warn logs a message at the warning log level and returns the formatted error.
// At the warning level and higher, there is no performance benefit if the
// message is not actually logged, because all formats are processed and all
// closures are executed to format the error message.
// See Debug for further explanation of the arguments.
func (log Logger) Warn(arg0 interface{}, args ...interface{}) error {
const (
lvl = WARNING
)
var msg string
switch first := arg0.(type) {
case string:
// Use the string as a format string
msg = fmt.Sprintf(first, args...)
case func() string:
// Log the closure (no other arguments used)
msg = first()
default:
// Build a format string so that it will be similar to Sprint
msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
}
log.intLogf(lvl, msg)
return errors.New(msg)
}
// Error logs a message at the error log level and returns the formatted error,
// See Warn for an explanation of the performance and Debug for an explanation
// of the parameters.
func (log Logger) Error(arg0 interface{}, args ...interface{}) error {
const (
lvl = ERROR
)
var msg string
switch first := arg0.(type) {
case string:
// Use the string as a format string
msg = fmt.Sprintf(first, args...)
case func() string:
// Log the closure (no other arguments used)
msg = first()
default:
// Build a format string so that it will be similar to Sprint
msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
}
log.intLogf(lvl, msg)
return errors.New(msg)
}
// Critical logs a message at the critical log level and returns the formatted error,
// See Warn for an explanation of the performance and Debug for an explanation
// of the parameters.
func (log Logger) Critical(arg0 interface{}, args ...interface{}) error {
const (
lvl = CRITICAL
)
var msg string
switch first := arg0.(type) {
case string:
// Use the string as a format string
msg = fmt.Sprintf(first, args...)
case func() string:
// Log the closure (no other arguments used)
msg = first()
default:
// Build a format string so that it will be similar to Sprint
msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
}
log.intLogf(lvl, msg)
return errors.New(msg)
}

View File

@ -1,138 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
package log4go
import (
"bytes"
"fmt"
"io"
"strings"
"sync"
)
const (
FORMAT_DEFAULT = "[%D %T] [%L] (%S) %M"
FORMAT_SHORT = "[%t %d] [%L] %M"
FORMAT_ABBREV = "[%L] %M"
)
type formatCacheType struct {
LastUpdateSeconds int64
shortTime, shortDate string
longTime, longDate string
}
var formatCache = &formatCacheType{}
var muFormatCache = sync.Mutex{}
func setFormatCache(f *formatCacheType) {
muFormatCache.Lock()
defer muFormatCache.Unlock()
formatCache = f
}
func getFormatCache() *formatCacheType {
muFormatCache.Lock()
defer muFormatCache.Unlock()
return formatCache
}
// Known format codes:
// %T - Time (15:04:05 MST)
// %t - Time (15:04)
// %D - Date (2006/01/02)
// %d - Date (01/02/06)
// %L - Level (FNST, FINE, DEBG, TRAC, WARN, EROR, CRIT)
// %S - Source
// %M - Message
// Ignores unknown formats
// Recommended: "[%D %T] [%L] (%S) %M"
func FormatLogRecord(format string, rec *LogRecord) string {
if rec == nil {
return "<nil>"
}
if len(format) == 0 {
return ""
}
out := bytes.NewBuffer(make([]byte, 0, 64))
secs := rec.Created.UnixNano() / 1e9
cache := getFormatCache()
if cache.LastUpdateSeconds != secs {
month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year()
hour, minute, second := rec.Created.Hour(), rec.Created.Minute(), rec.Created.Second()
zone, _ := rec.Created.Zone()
updated := &formatCacheType{
LastUpdateSeconds: secs,
shortTime: fmt.Sprintf("%02d:%02d", hour, minute),
shortDate: fmt.Sprintf("%02d/%02d/%02d", day, month, year%100),
longTime: fmt.Sprintf("%02d:%02d:%02d %s", hour, minute, second, zone),
longDate: fmt.Sprintf("%04d/%02d/%02d", year, month, day),
}
cache = updated
setFormatCache(updated)
}
// Split the string into pieces by % signs
pieces := bytes.Split([]byte(format), []byte{'%'})
// Iterate over the pieces, replacing known formats
for i, piece := range pieces {
if i > 0 && len(piece) > 0 {
switch piece[0] {
case 'T':
out.WriteString(cache.longTime)
case 't':
out.WriteString(cache.shortTime)
case 'D':
out.WriteString(cache.longDate)
case 'd':
out.WriteString(cache.shortDate)
case 'L':
out.WriteString(levelStrings[rec.Level])
case 'S':
out.WriteString(rec.Source)
case 's':
slice := strings.Split(rec.Source, "/")
out.WriteString(slice[len(slice)-1])
case 'M':
out.WriteString(rec.Message)
}
if len(piece) > 1 {
out.Write(piece[1:])
}
} else if len(piece) > 0 {
out.Write(piece)
}
}
out.WriteByte('\n')
return out.String()
}
// This is the standard writer that prints to standard output.
type FormatLogWriter chan *LogRecord
// This creates a new FormatLogWriter
func NewFormatLogWriter(out io.Writer, format string) FormatLogWriter {
records := make(FormatLogWriter, LogBufferLength)
go records.run(out, format)
return records
}
func (w FormatLogWriter) run(out io.Writer, format string) {
for rec := range w {
fmt.Fprint(out, FormatLogRecord(format, rec))
}
}
// This is the FormatLogWriter's output method. This will block if the output
// buffer is full.
func (w FormatLogWriter) LogWrite(rec *LogRecord) {
w <- rec
}
// Close stops the logger from sending messages to standard output. Attempts to
// send log messages to this logger after a Close have undefined behavior.
func (w FormatLogWriter) Close() {
close(w)
}

View File

@ -1,57 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
package log4go
import (
"encoding/json"
"fmt"
"net"
"os"
)
// This log writer sends output to a socket
type SocketLogWriter chan *LogRecord
// This is the SocketLogWriter's output method
func (w SocketLogWriter) LogWrite(rec *LogRecord) {
w <- rec
}
func (w SocketLogWriter) Close() {
close(w)
}
func NewSocketLogWriter(proto, hostport string) SocketLogWriter {
sock, err := net.Dial(proto, hostport)
if err != nil {
fmt.Fprintf(os.Stderr, "NewSocketLogWriter(%q): %s\n", hostport, err)
return nil
}
w := SocketLogWriter(make(chan *LogRecord, LogBufferLength))
go func() {
defer func() {
if sock != nil && proto == "tcp" {
sock.Close()
}
}()
for rec := range w {
// Marshall into JSON
js, err := json.Marshal(rec)
if err != nil {
fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err)
return
}
_, err = sock.Write(js)
if err != nil {
fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err)
return
}
}
}()
return w
}

View File

@ -1,49 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
package log4go
import (
"fmt"
"io"
"os"
"time"
)
var stdout io.Writer = os.Stdout
// This is the standard writer that prints to standard output.
type ConsoleLogWriter struct {
format string
w chan *LogRecord
}
// This creates a new ConsoleLogWriter
func NewConsoleLogWriter() *ConsoleLogWriter {
consoleWriter := &ConsoleLogWriter{
format: "[%T %D] [%L] (%S) %M",
w: make(chan *LogRecord, LogBufferLength),
}
go consoleWriter.run(stdout)
return consoleWriter
}
func (c *ConsoleLogWriter) SetFormat(format string) {
c.format = format
}
func (c *ConsoleLogWriter) run(out io.Writer) {
for rec := range c.w {
fmt.Fprint(out, FormatLogRecord(c.format, rec))
}
}
// This is the ConsoleLogWriter's output method. This will block if the output
// buffer is full.
func (c *ConsoleLogWriter) LogWrite(rec *LogRecord) {
c.w <- rec
}
// Close stops the logger from sending messages to standard output. Attempts to
// send log messages to this logger after a Close have undefined behavior.
func (c *ConsoleLogWriter) Close() {
close(c.w)
time.Sleep(50 * time.Millisecond) // Try to give console I/O time to complete
}

View File

@ -1,278 +0,0 @@
// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
package log4go
import (
"errors"
"fmt"
"os"
"strings"
)
var (
Global Logger
)
func init() {
Global = NewDefaultLogger(DEBUG)
}
// Wrapper for (*Logger).LoadConfiguration
func LoadConfiguration(filename string) {
Global.LoadConfiguration(filename)
}
// Wrapper for (*Logger).AddFilter
func AddFilter(name string, lvl Level, writer LogWriter) {
Global.AddFilter(name, lvl, writer)
}
// Wrapper for (*Logger).Close (closes and removes all logwriters)
func Close() {
Global.Close()
}
func Crash(args ...interface{}) {
if len(args) > 0 {
Global.intLogf(CRITICAL, strings.Repeat(" %v", len(args))[1:], args...)
}
panic(args)
}
// Logs the given message and crashes the program
func Crashf(format string, args ...interface{}) {
Global.intLogf(CRITICAL, format, args...)
Global.Close() // so that hopefully the messages get logged
panic(fmt.Sprintf(format, args...))
}
// Compatibility with `log`
func Exit(args ...interface{}) {
if len(args) > 0 {
Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...)
}
Global.Close() // so that hopefully the messages get logged
os.Exit(0)
}
// Compatibility with `log`
func Exitf(format string, args ...interface{}) {
Global.intLogf(ERROR, format, args...)
Global.Close() // so that hopefully the messages get logged
os.Exit(0)
}
// Compatibility with `log`
func Stderr(args ...interface{}) {
if len(args) > 0 {
Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...)
}
}
// Compatibility with `log`
func Stderrf(format string, args ...interface{}) {
Global.intLogf(ERROR, format, args...)
}
// Compatibility with `log`
func Stdout(args ...interface{}) {
if len(args) > 0 {
Global.intLogf(INFO, strings.Repeat(" %v", len(args))[1:], args...)
}
}
// Compatibility with `log`
func Stdoutf(format string, args ...interface{}) {
Global.intLogf(INFO, format, args...)
}
// Send a log message manually
// Wrapper for (*Logger).Log
func Log(lvl Level, source, message string) {
Global.Log(lvl, source, message)
}
// Send a formatted log message easily
// Wrapper for (*Logger).Logf
func Logf(lvl Level, format string, args ...interface{}) {
Global.intLogf(lvl, format, args...)
}
// Send a closure log message
// Wrapper for (*Logger).Logc
func Logc(lvl Level, closure func() string) {
Global.intLogc(lvl, closure)
}
// Utility for finest log messages (see Debug() for parameter explanation)
// Wrapper for (*Logger).Finest
func Finest(arg0 interface{}, args ...interface{}) {
const (
lvl = FINEST
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
Global.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Utility for fine log messages (see Debug() for parameter explanation)
// Wrapper for (*Logger).Fine
func Fine(arg0 interface{}, args ...interface{}) {
const (
lvl = FINE
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
Global.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Utility for debug log messages
// When given a string as the first argument, this behaves like Logf but with the DEBUG log level (e.g. the first argument is interpreted as a format for the latter arguments)
// When given a closure of type func()string, this logs the string returned by the closure iff it will be logged. The closure runs at most one time.
// When given anything else, the log message will be each of the arguments formatted with %v and separated by spaces (ala Sprint).
// Wrapper for (*Logger).Debug
func Debug(arg0 interface{}, args ...interface{}) {
const (
lvl = DEBUG
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
Global.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Utility for trace log messages (see Debug() for parameter explanation)
// Wrapper for (*Logger).Trace
func Trace(arg0 interface{}, args ...interface{}) {
const (
lvl = TRACE
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
Global.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Utility for info log messages (see Debug() for parameter explanation)
// Wrapper for (*Logger).Info
func Info(arg0 interface{}, args ...interface{}) {
const (
lvl = INFO
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
case func() string:
// Log the closure (no other arguments used)
Global.intLogc(lvl, first)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
}
}
// Utility for warn log messages (returns an error for easy function returns) (see Debug() for parameter explanation)
// These functions will execute a closure exactly once, to build the error message for the return
// Wrapper for (*Logger).Warn
func Warn(arg0 interface{}, args ...interface{}) error {
const (
lvl = WARNING
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
return errors.New(fmt.Sprintf(first, args...))
case func() string:
// Log the closure (no other arguments used)
str := first()
Global.intLogf(lvl, "%s", str)
return errors.New(str)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...))
}
return nil
}
// Utility for error log messages (returns an error for easy function returns) (see Debug() for parameter explanation)
// These functions will execute a closure exactly once, to build the error message for the return
// Wrapper for (*Logger).Error
func Error(arg0 interface{}, args ...interface{}) error {
const (
lvl = ERROR
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
return errors.New(fmt.Sprintf(first, args...))
case func() string:
// Log the closure (no other arguments used)
str := first()
Global.intLogf(lvl, "%s", str)
return errors.New(str)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...))
}
return nil
}
// Utility for critical log messages (returns an error for easy function returns) (see Debug() for parameter explanation)
// These functions will execute a closure exactly once, to build the error message for the return
// Wrapper for (*Logger).Critical
func Critical(arg0 interface{}, args ...interface{}) error {
const (
lvl = CRITICAL
)
switch first := arg0.(type) {
case string:
// Use the string as a format string
Global.intLogf(lvl, first, args...)
return errors.New(fmt.Sprintf(first, args...))
case func() string:
// Log the closure (no other arguments used)
str := first()
Global.intLogf(lvl, "%s", str)
return errors.New(str)
default:
// Build a format string so that it will be similar to Sprint
Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...))
}
return nil
}

21
vendor/github.com/blang/semver/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,21 @@
language: go
matrix:
include:
- go: 1.4.3
- go: 1.5.4
- go: 1.6.3
- go: 1.7
- go: tip
allow_failures:
- go: tip
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
-repotoken $COVERALLS_TOKEN
- echo "Build examples" ; cd examples && go build
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
env:
global:
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=

22
vendor/github.com/blang/semver/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

194
vendor/github.com/blang/semver/README.md generated vendored Normal file
View File

@ -0,0 +1,194 @@
semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
Usage
-----
```bash
$ go get github.com/blang/semver
```
Note: Always vendor your dependencies or fix on a specific version tag.
```go
import github.com/blang/semver
v1, err := semver.Make("1.0.0-beta")
v2, err := semver.Make("2.0.0-beta")
v1.Compare(v2)
```
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
Why should I use this lib?
-----
- Fully spec compatible
- No reflection
- No regex
- Fully tested (Coverage >99%)
- Readable parsing/validation errors
- Fast (See [Benchmarks](#benchmarks))
- Only Stdlib
- Uses values instead of pointers
- Many features, see below
Features
-----
- Parsing and validation at all levels
- Comparator-like comparisons
- Compare Helper Methods
- InPlace manipulation
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
- Wildcards `>=1.x`, `<=2.5.x`
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
Ranges
------
A `Range` is a set of conditions which specify which versions satisfy the range.
A condition is composed of an operator and a version. The supported operators are:
- `<1.0.0` Less than `1.0.0`
- `<=1.0.0` Less than or equal to `1.0.0`
- `>1.0.0` Greater than `1.0.0`
- `>=1.0.0` Greater than or equal to `1.0.0`
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
Note that spaces between the operator and the version will be gracefully tolerated.
A `Range` can link multiple `Ranges` separated by space:
Ranges can be linked by logical AND:
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
Ranges can also be linked by logical OR:
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
AND has a higher precedence than OR. It's not possible to use brackets.
Ranges can be combined by both AND and OR
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
Range usage:
```
v, err := semver.Parse("1.2.3")
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
if range(v) {
//valid
}
```
Example
-----
Have a look at full examples in [examples/main.go](examples/main.go)
```go
import github.com/blang/semver
v, err := semver.Make("0.0.1-alpha.preview+123.github")
fmt.Printf("Major: %d\n", v.Major)
fmt.Printf("Minor: %d\n", v.Minor)
fmt.Printf("Patch: %d\n", v.Patch)
fmt.Printf("Pre: %s\n", v.Pre)
fmt.Printf("Build: %s\n", v.Build)
// Prerelease versions array
if len(v.Pre) > 0 {
fmt.Println("Prerelease versions:")
for i, pre := range v.Pre {
fmt.Printf("%d: %q\n", i, pre)
}
}
// Build meta data array
if len(v.Build) > 0 {
fmt.Println("Build meta data:")
for i, build := range v.Build {
fmt.Printf("%d: %q\n", i, build)
}
}
v001, err := semver.Make("0.0.1")
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
v001.GT(v) == true
v.LT(v001) == true
v.GTE(v) == true
v.LTE(v) == true
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
v001.Compare(v) == 1
v.Compare(v001) == -1
v.Compare(v) == 0
// Manipulate Version in place:
v.Pre[0], err = semver.NewPRVersion("beta")
if err != nil {
fmt.Printf("Error parsing pre release version: %q", err)
}
fmt.Println("\nValidate versions:")
v.Build[0] = "?"
err = v.Validate()
if err != nil {
fmt.Printf("Validation failed: %s\n", err)
}
```
Benchmarks
-----
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
See benchmark cases at [semver_test.go](semver_test.go)
Motivation
-----
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
Contribution
-----
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
License
-----
See [LICENSE](LICENSE) file.

23
vendor/github.com/blang/semver/json.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package semver
import (
"encoding/json"
)
// MarshalJSON implements the encoding/json.Marshaler interface.
func (v Version) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (v *Version) UnmarshalJSON(data []byte) (err error) {
var versionString string
if err = json.Unmarshal(data, &versionString); err != nil {
return
}
*v, err = Parse(versionString)
return
}

17
vendor/github.com/blang/semver/package.json generated vendored Normal file
View File

@ -0,0 +1,17 @@
{
"author": "blang",
"bugs": {
"URL": "https://github.com/blang/semver/issues",
"url": "https://github.com/blang/semver/issues"
},
"gx": {
"dvcsimport": "github.com/blang/semver"
},
"gxVersion": "0.10.0",
"language": "go",
"license": "MIT",
"name": "semver",
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
"version": "3.5.1"
}

416
vendor/github.com/blang/semver/range.go generated vendored Normal file
View File

@ -0,0 +1,416 @@
package semver
import (
"fmt"
"strconv"
"strings"
"unicode"
)
type wildcardType int
const (
noneWildcard wildcardType = iota
majorWildcard wildcardType = 1
minorWildcard wildcardType = 2
patchWildcard wildcardType = 3
)
func wildcardTypefromInt(i int) wildcardType {
switch i {
case 1:
return majorWildcard
case 2:
return minorWildcard
case 3:
return patchWildcard
default:
return noneWildcard
}
}
type comparator func(Version, Version) bool
var (
compEQ comparator = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) == 0
}
compNE = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) != 0
}
compGT = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) == 1
}
compGE = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) >= 0
}
compLT = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) == -1
}
compLE = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) <= 0
}
)
type versionRange struct {
v Version
c comparator
}
// rangeFunc creates a Range from the given versionRange.
func (vr *versionRange) rangeFunc() Range {
return Range(func(v Version) bool {
return vr.c(v, vr.v)
})
}
// Range represents a range of versions.
// A Range can be used to check if a Version satisfies it:
//
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
// range(semver.MustParse("1.1.1") // returns true
type Range func(Version) bool
// OR combines the existing Range with another Range using logical OR.
func (rf Range) OR(f Range) Range {
return Range(func(v Version) bool {
return rf(v) || f(v)
})
}
// AND combines the existing Range with another Range using logical AND.
func (rf Range) AND(f Range) Range {
return Range(func(v Version) bool {
return rf(v) && f(v)
})
}
// ParseRange parses a range and returns a Range.
// If the range could not be parsed an error is returned.
//
// Valid ranges are:
// - "<1.0.0"
// - "<=1.0.0"
// - ">1.0.0"
// - ">=1.0.0"
// - "1.0.0", "=1.0.0", "==1.0.0"
// - "!1.0.0", "!=1.0.0"
//
// A Range can consist of multiple ranges separated by space:
// Ranges can be linked by logical AND:
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
//
// Ranges can also be linked by logical OR:
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
//
// AND has a higher precedence than OR. It's not possible to use brackets.
//
// Ranges can be combined by both AND and OR
//
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
func ParseRange(s string) (Range, error) {
parts := splitAndTrim(s)
orParts, err := splitORParts(parts)
if err != nil {
return nil, err
}
expandedParts, err := expandWildcardVersion(orParts)
if err != nil {
return nil, err
}
var orFn Range
for _, p := range expandedParts {
var andFn Range
for _, ap := range p {
opStr, vStr, err := splitComparatorVersion(ap)
if err != nil {
return nil, err
}
vr, err := buildVersionRange(opStr, vStr)
if err != nil {
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
}
rf := vr.rangeFunc()
// Set function
if andFn == nil {
andFn = rf
} else { // Combine with existing function
andFn = andFn.AND(rf)
}
}
if orFn == nil {
orFn = andFn
} else {
orFn = orFn.OR(andFn)
}
}
return orFn, nil
}
// splitORParts splits the already cleaned parts by '||'.
// Checks for invalid positions of the operator and returns an
// error if found.
func splitORParts(parts []string) ([][]string, error) {
var ORparts [][]string
last := 0
for i, p := range parts {
if p == "||" {
if i == 0 {
return nil, fmt.Errorf("First element in range is '||'")
}
ORparts = append(ORparts, parts[last:i])
last = i + 1
}
}
if last == len(parts) {
return nil, fmt.Errorf("Last element in range is '||'")
}
ORparts = append(ORparts, parts[last:])
return ORparts, nil
}
// buildVersionRange takes a slice of 2: operator and version
// and builds a versionRange, otherwise an error.
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
c := parseComparator(opStr)
if c == nil {
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
}
v, err := Parse(vStr)
if err != nil {
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
}
return &versionRange{
v: v,
c: c,
}, nil
}
// inArray checks if a byte is contained in an array of bytes
func inArray(s byte, list []byte) bool {
for _, el := range list {
if el == s {
return true
}
}
return false
}
// splitAndTrim splits a range string by spaces and cleans whitespaces
func splitAndTrim(s string) (result []string) {
last := 0
var lastChar byte
excludeFromSplit := []byte{'>', '<', '='}
for i := 0; i < len(s); i++ {
if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
if last < i-1 {
result = append(result, s[last:i])
}
last = i + 1
} else if s[i] != ' ' {
lastChar = s[i]
}
}
if last < len(s)-1 {
result = append(result, s[last:])
}
for i, v := range result {
result[i] = strings.Replace(v, " ", "", -1)
}
// parts := strings.Split(s, " ")
// for _, x := range parts {
// if s := strings.TrimSpace(x); len(s) != 0 {
// result = append(result, s)
// }
// }
return
}
// splitComparatorVersion splits the comparator from the version.
// Input must be free of leading or trailing spaces.
func splitComparatorVersion(s string) (string, string, error) {
i := strings.IndexFunc(s, unicode.IsDigit)
if i == -1 {
return "", "", fmt.Errorf("Could not get version from string: %q", s)
}
return strings.TrimSpace(s[0:i]), s[i:], nil
}
// getWildcardType will return the type of wildcard that the
// passed version contains
func getWildcardType(vStr string) wildcardType {
parts := strings.Split(vStr, ".")
nparts := len(parts)
wildcard := parts[nparts-1]
possibleWildcardType := wildcardTypefromInt(nparts)
if wildcard == "x" {
return possibleWildcardType
}
return noneWildcard
}
// createVersionFromWildcard will convert a wildcard version
// into a regular version, replacing 'x's with '0's, handling
// special cases like '1.x.x' and '1.x'
func createVersionFromWildcard(vStr string) string {
// handle 1.x.x
vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
parts := strings.Split(vStr2, ".")
// handle 1.x
if len(parts) == 2 {
return vStr2 + ".0"
}
return vStr2
}
// incrementMajorVersion will increment the major version
// of the passed version
func incrementMajorVersion(vStr string) (string, error) {
parts := strings.Split(vStr, ".")
i, err := strconv.Atoi(parts[0])
if err != nil {
return "", err
}
parts[0] = strconv.Itoa(i + 1)
return strings.Join(parts, "."), nil
}
// incrementMajorVersion will increment the minor version
// of the passed version
func incrementMinorVersion(vStr string) (string, error) {
parts := strings.Split(vStr, ".")
i, err := strconv.Atoi(parts[1])
if err != nil {
return "", err
}
parts[1] = strconv.Itoa(i + 1)
return strings.Join(parts, "."), nil
}
// expandWildcardVersion will expand wildcards inside versions
// following these rules:
//
// * when dealing with patch wildcards:
// >= 1.2.x will become >= 1.2.0
// <= 1.2.x will become < 1.3.0
// > 1.2.x will become >= 1.3.0
// < 1.2.x will become < 1.2.0
// != 1.2.x will become < 1.2.0 >= 1.3.0
//
// * when dealing with minor wildcards:
// >= 1.x will become >= 1.0.0
// <= 1.x will become < 2.0.0
// > 1.x will become >= 2.0.0
// < 1.0 will become < 1.0.0
// != 1.x will become < 1.0.0 >= 2.0.0
//
// * when dealing with wildcards without
// version operator:
// 1.2.x will become >= 1.2.0 < 1.3.0
// 1.x will become >= 1.0.0 < 2.0.0
func expandWildcardVersion(parts [][]string) ([][]string, error) {
var expandedParts [][]string
for _, p := range parts {
var newParts []string
for _, ap := range p {
if strings.Index(ap, "x") != -1 {
opStr, vStr, err := splitComparatorVersion(ap)
if err != nil {
return nil, err
}
versionWildcardType := getWildcardType(vStr)
flatVersion := createVersionFromWildcard(vStr)
var resultOperator string
var shouldIncrementVersion bool
switch opStr {
case ">":
resultOperator = ">="
shouldIncrementVersion = true
case ">=":
resultOperator = ">="
case "<":
resultOperator = "<"
case "<=":
resultOperator = "<"
shouldIncrementVersion = true
case "", "=", "==":
newParts = append(newParts, ">="+flatVersion)
resultOperator = "<"
shouldIncrementVersion = true
case "!=", "!":
newParts = append(newParts, "<"+flatVersion)
resultOperator = ">="
shouldIncrementVersion = true
}
var resultVersion string
if shouldIncrementVersion {
switch versionWildcardType {
case patchWildcard:
resultVersion, _ = incrementMinorVersion(flatVersion)
case minorWildcard:
resultVersion, _ = incrementMajorVersion(flatVersion)
}
} else {
resultVersion = flatVersion
}
ap = resultOperator + resultVersion
}
newParts = append(newParts, ap)
}
expandedParts = append(expandedParts, newParts)
}
return expandedParts, nil
}
func parseComparator(s string) comparator {
switch s {
case "==":
fallthrough
case "":
fallthrough
case "=":
return compEQ
case ">":
return compGT
case ">=":
return compGE
case "<":
return compLT
case "<=":
return compLE
case "!":
fallthrough
case "!=":
return compNE
}
return nil
}
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
func MustParseRange(s string) Range {
r, err := ParseRange(s)
if err != nil {
panic(`semver: ParseRange(` + s + `): ` + err.Error())
}
return r
}

418
vendor/github.com/blang/semver/semver.go generated vendored Normal file
View File

@ -0,0 +1,418 @@
package semver
import (
"errors"
"fmt"
"strconv"
"strings"
)
const (
numbers string = "0123456789"
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
alphanum = alphas + numbers
)
// SpecVersion is the latest fully supported spec version of semver
var SpecVersion = Version{
Major: 2,
Minor: 0,
Patch: 0,
}
// Version represents a semver compatible version
type Version struct {
Major uint64
Minor uint64
Patch uint64
Pre []PRVersion
Build []string //No Precendence
}
// Version to string
func (v Version) String() string {
b := make([]byte, 0, 5)
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
if len(v.Pre) > 0 {
b = append(b, '-')
b = append(b, v.Pre[0].String()...)
for _, pre := range v.Pre[1:] {
b = append(b, '.')
b = append(b, pre.String()...)
}
}
if len(v.Build) > 0 {
b = append(b, '+')
b = append(b, v.Build[0]...)
for _, build := range v.Build[1:] {
b = append(b, '.')
b = append(b, build...)
}
}
return string(b)
}
// Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0)
}
// EQ checks if v is equal to o.
func (v Version) EQ(o Version) bool {
return (v.Compare(o) == 0)
}
// NE checks if v is not equal to o.
func (v Version) NE(o Version) bool {
return (v.Compare(o) != 0)
}
// GT checks if v is greater than o.
func (v Version) GT(o Version) bool {
return (v.Compare(o) == 1)
}
// GTE checks if v is greater than or equal to o.
func (v Version) GTE(o Version) bool {
return (v.Compare(o) >= 0)
}
// GE checks if v is greater than or equal to o.
func (v Version) GE(o Version) bool {
return (v.Compare(o) >= 0)
}
// LT checks if v is less than o.
func (v Version) LT(o Version) bool {
return (v.Compare(o) == -1)
}
// LTE checks if v is less than or equal to o.
func (v Version) LTE(o Version) bool {
return (v.Compare(o) <= 0)
}
// LE checks if v is less than or equal to o.
func (v Version) LE(o Version) bool {
return (v.Compare(o) <= 0)
}
// Compare compares Versions v to o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v Version) Compare(o Version) int {
if v.Major != o.Major {
if v.Major > o.Major {
return 1
}
return -1
}
if v.Minor != o.Minor {
if v.Minor > o.Minor {
return 1
}
return -1
}
if v.Patch != o.Patch {
if v.Patch > o.Patch {
return 1
}
return -1
}
// Quick comparison if a version has no prerelease versions
if len(v.Pre) == 0 && len(o.Pre) == 0 {
return 0
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
return 1
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
return -1
}
i := 0
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
continue
} else if comp == 1 {
return 1
} else {
return -1
}
}
// If all pr versions are the equal but one has further prversion, this one greater
if i == len(v.Pre) && i == len(o.Pre) {
return 0
} else if i == len(v.Pre) && i < len(o.Pre) {
return -1
} else {
return 1
}
}
// Validate validates v and returns error in case
func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64
for _, pre := range v.Pre {
if !pre.IsNum { //Numeric prerelease versions already uint64
if len(pre.VersionStr) == 0 {
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
}
if !containsOnly(pre.VersionStr, alphanum) {
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
}
}
}
for _, build := range v.Build {
if len(build) == 0 {
return fmt.Errorf("Build meta data can not be empty %q", build)
}
if !containsOnly(build, alphanum) {
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
}
}
return nil
}
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
func New(s string) (vp *Version, err error) {
v, err := Parse(s)
vp = &v
return
}
// Make is an alias for Parse, parses version string and returns a validated Version or error
func Make(s string) (Version, error) {
return Parse(s)
}
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
// specs to be parsed by this library. It does so by normalizing versions before passing them to
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
// with only major and minor components specified
func ParseTolerant(s string) (Version, error) {
s = strings.TrimSpace(s)
s = strings.TrimPrefix(s, "v")
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) < 3 {
if strings.ContainsAny(parts[len(parts)-1], "+-") {
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
}
for len(parts) < 3 {
parts = append(parts, "0")
}
s = strings.Join(parts, ".")
}
return Parse(s)
}
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {
return Version{}, errors.New("Version string empty")
}
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) != 3 {
return Version{}, errors.New("No Major.Minor.Patch elements found")
}
// Major
if !containsOnly(parts[0], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
}
if hasLeadingZeroes(parts[0]) {
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
}
major, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return Version{}, err
}
// Minor
if !containsOnly(parts[1], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
}
if hasLeadingZeroes(parts[1]) {
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
}
minor, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return Version{}, err
}
v := Version{}
v.Major = major
v.Minor = minor
var build, prerelease []string
patchStr := parts[2]
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
build = strings.Split(patchStr[buildIndex+1:], ".")
patchStr = patchStr[:buildIndex]
}
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
prerelease = strings.Split(patchStr[preIndex+1:], ".")
patchStr = patchStr[:preIndex]
}
if !containsOnly(patchStr, numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
}
if hasLeadingZeroes(patchStr) {
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
}
patch, err := strconv.ParseUint(patchStr, 10, 64)
if err != nil {
return Version{}, err
}
v.Patch = patch
// Prerelease
for _, prstr := range prerelease {
parsedPR, err := NewPRVersion(prstr)
if err != nil {
return Version{}, err
}
v.Pre = append(v.Pre, parsedPR)
}
// Build meta data
for _, str := range build {
if len(str) == 0 {
return Version{}, errors.New("Build meta data is empty")
}
if !containsOnly(str, alphanum) {
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
}
v.Build = append(v.Build, str)
}
return v, nil
}
// MustParse is like Parse but panics if the version cannot be parsed.
func MustParse(s string) Version {
v, err := Parse(s)
if err != nil {
panic(`semver: Parse(` + s + `): ` + err.Error())
}
return v
}
// PRVersion represents a PreRelease Version
type PRVersion struct {
VersionStr string
VersionNum uint64
IsNum bool
}
// NewPRVersion creates a new valid prerelease version
func NewPRVersion(s string) (PRVersion, error) {
if len(s) == 0 {
return PRVersion{}, errors.New("Prerelease is empty")
}
v := PRVersion{}
if containsOnly(s, numbers) {
if hasLeadingZeroes(s) {
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
}
num, err := strconv.ParseUint(s, 10, 64)
// Might never be hit, but just in case
if err != nil {
return PRVersion{}, err
}
v.VersionNum = num
v.IsNum = true
} else if containsOnly(s, alphanum) {
v.VersionStr = s
v.IsNum = false
} else {
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
}
return v, nil
}
// IsNumeric checks if prerelease-version is numeric
func (v PRVersion) IsNumeric() bool {
return v.IsNum
}
// Compare compares two PreRelease Versions v and o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v PRVersion) Compare(o PRVersion) int {
if v.IsNum && !o.IsNum {
return -1
} else if !v.IsNum && o.IsNum {
return 1
} else if v.IsNum && o.IsNum {
if v.VersionNum == o.VersionNum {
return 0
} else if v.VersionNum > o.VersionNum {
return 1
} else {
return -1
}
} else { // both are Alphas
if v.VersionStr == o.VersionStr {
return 0
} else if v.VersionStr > o.VersionStr {
return 1
} else {
return -1
}
}
}
// PreRelease version to string
func (v PRVersion) String() string {
if v.IsNum {
return strconv.FormatUint(v.VersionNum, 10)
}
return v.VersionStr
}
func containsOnly(s string, set string) bool {
return strings.IndexFunc(s, func(r rune) bool {
return !strings.ContainsRune(set, r)
}) == -1
}
func hasLeadingZeroes(s string) bool {
return len(s) > 1 && s[0] == '0'
}
// NewBuildVersion creates a new valid build version
func NewBuildVersion(s string) (string, error) {
if len(s) == 0 {
return "", errors.New("Buildversion is empty")
}
if !containsOnly(s, alphanum) {
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
}
return s, nil
}

28
vendor/github.com/blang/semver/sort.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package semver
import (
"sort"
)
// Versions represents multiple versions.
type Versions []Version
// Len returns length of version collection
func (s Versions) Len() int {
return len(s)
}
// Swap swaps two versions inside the collection by its indices
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less checks if version at index i is less than version at index j
func (s Versions) Less(i, j int) bool {
return s[i].LT(s[j])
}
// Sort sorts a slice of versions
func Sort(versions []Version) {
sort.Sort(Versions(versions))
}

30
vendor/github.com/blang/semver/sql.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package semver
import (
"database/sql/driver"
"fmt"
)
// Scan implements the database/sql.Scanner interface.
func (v *Version) Scan(src interface{}) (err error) {
var str string
switch src := src.(type) {
case string:
str = src
case []byte:
str = string(src)
default:
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
}
if t, err := Parse(str); err == nil {
*v = t
}
return
}
// Value implements the database/sql/driver.Valuer interface.
func (v Version) Value() (driver.Value, error) {
return v.String(), nil
}

View File

@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.

View File

@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
type flag uintptr
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
panic("reflect.Value read-only flag has changed semantics")
}

View File

@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
// +build js appengine safe disableunsafe !go1.4
package spew

View File

@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
case nilFound:
d.w.Write(nilAngleBytes)
case cycleFound == true:
case cycleFound:
d.w.Write(circularBytes)
default:

View File

@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
case nilFound == true:
case nilFound:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
case cycleFound:
f.fs.Write(circularShortBytes)
default:

22
vendor/github.com/dyatlov/go-opengraph/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Vitaly Dyatlov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,365 @@
package opengraph
import (
"encoding/json"
"io"
"strconv"
"time"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
// Image defines Open Graph Image type
type Image struct {
URL string `json:"url"`
SecureURL string `json:"secure_url"`
Type string `json:"type"`
Width uint64 `json:"width"`
Height uint64 `json:"height"`
draft bool `json:"-"`
}
// Video defines Open Graph Video type
type Video struct {
URL string `json:"url"`
SecureURL string `json:"secure_url"`
Type string `json:"type"`
Width uint64 `json:"width"`
Height uint64 `json:"height"`
draft bool `json:"-"`
}
// Audio defines Open Graph Audio Type
type Audio struct {
URL string `json:"url"`
SecureURL string `json:"secure_url"`
Type string `json:"type"`
draft bool `json:"-"`
}
// Article contain Open Graph Article structure
type Article struct {
PublishedTime *time.Time `json:"published_time"`
ModifiedTime *time.Time `json:"modified_time"`
ExpirationTime *time.Time `json:"expiration_time"`
Section string `json:"section"`
Tags []string `json:"tags"`
Authors []*Profile `json:"authors"`
}
// Profile contains Open Graph Profile structure
type Profile struct {
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Username string `json:"username"`
Gender string `json:"gender"`
}
// Book contains Open Graph Book structure
type Book struct {
ISBN string `json:"isbn"`
ReleaseDate *time.Time `json:"release_date"`
Tags []string `json:"tags"`
Authors []*Profile `json:"authors"`
}
// OpenGraph contains facebook og data
type OpenGraph struct {
isArticle bool
isBook bool
isProfile bool
Type string `json:"type"`
URL string `json:"url"`
Title string `json:"title"`
Description string `json:"description"`
Determiner string `json:"determiner"`
SiteName string `json:"site_name"`
Locale string `json:"locale"`
LocalesAlternate []string `json:"locales_alternate"`
Images []*Image `json:"images"`
Audios []*Audio `json:"audios"`
Videos []*Video `json:"videos"`
Article *Article `json:"article,omitempty"`
Book *Book `json:"book,omitempty"`
Profile *Profile `json:"profile,omitempty"`
}
// NewOpenGraph returns new instance of Open Graph structure
func NewOpenGraph() *OpenGraph {
return &OpenGraph{}
}
// ToJSON a simple wrapper around json.Marshal
func (og *OpenGraph) ToJSON() ([]byte, error) {
return json.Marshal(og)
}
// String return json representation of structure, or error string
func (og *OpenGraph) String() string {
data, err := og.ToJSON()
if err != nil {
return err.Error()
}
return string(data[:])
}
// ProcessHTML parses given html from Reader interface and fills up OpenGraph structure
func (og *OpenGraph) ProcessHTML(buffer io.Reader) error {
z := html.NewTokenizer(buffer)
for {
tt := z.Next()
switch tt {
case html.ErrorToken:
if z.Err() == io.EOF {
return nil
}
return z.Err()
case html.StartTagToken, html.SelfClosingTagToken, html.EndTagToken:
name, hasAttr := z.TagName()
if atom.Lookup(name) == atom.Body {
return nil // OpenGraph is only in head, so we don't need body
}
if atom.Lookup(name) != atom.Meta || !hasAttr {
continue
}
m := make(map[string]string)
var key, val []byte
for hasAttr {
key, val, hasAttr = z.TagAttr()
m[atom.String(key)] = string(val)
}
og.ProcessMeta(m)
}
}
}
func (og *OpenGraph) ensureHasVideo() {
if len(og.Videos) > 0 {
return
}
og.Videos = append(og.Videos, &Video{draft: true})
}
func (og *OpenGraph) ensureHasImage() {
if len(og.Images) > 0 {
return
}
og.Images = append(og.Images, &Image{draft: true})
}
func (og *OpenGraph) ensureHasAudio() {
if len(og.Audios) > 0 {
return
}
og.Audios = append(og.Audios, &Audio{draft: true})
}
// ProcessMeta processes meta attributes and adds them to Open Graph structure if they are suitable for that
func (og *OpenGraph) ProcessMeta(metaAttrs map[string]string) {
switch metaAttrs["property"] {
case "og:description":
og.Description = metaAttrs["content"]
case "og:type":
og.Type = metaAttrs["content"]
switch og.Type {
case "article":
og.isArticle = true
case "book":
og.isBook = true
case "profile":
og.isProfile = true
}
case "og:title":
og.Title = metaAttrs["content"]
case "og:url":
og.URL = metaAttrs["content"]
case "og:determiner":
og.Determiner = metaAttrs["content"]
case "og:site_name":
og.SiteName = metaAttrs["content"]
case "og:locale":
og.Locale = metaAttrs["content"]
case "og:locale:alternate":
og.LocalesAlternate = append(og.LocalesAlternate, metaAttrs["content"])
case "og:audio":
if len(og.Audios)>0 && og.Audios[len(og.Audios)-1].draft {
og.Audios[len(og.Audios)-1].URL = metaAttrs["content"]
og.Audios[len(og.Audios)-1].draft = false
} else {
og.Audios = append(og.Audios, &Audio{URL: metaAttrs["content"]})
}
case "og:audio:secure_url":
og.ensureHasAudio()
og.Audios[len(og.Audios)-1].SecureURL = metaAttrs["content"]
case "og:audio:type":
og.ensureHasAudio()
og.Audios[len(og.Audios)-1].Type = metaAttrs["content"]
case "og:image":
if len(og.Images)>0 && og.Images[len(og.Images)-1].draft {
og.Images[len(og.Images)-1].URL = metaAttrs["content"]
og.Images[len(og.Images)-1].draft = false
} else {
og.Images = append(og.Images, &Image{URL: metaAttrs["content"]})
}
case "og:image:url":
og.ensureHasImage()
og.Images[len(og.Images)-1].URL = metaAttrs["content"]
case "og:image:secure_url":
og.ensureHasImage()
og.Images[len(og.Images)-1].SecureURL = metaAttrs["content"]
case "og:image:type":
og.ensureHasImage()
og.Images[len(og.Images)-1].Type = metaAttrs["content"]
case "og:image:width":
w, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
og.ensureHasImage()
og.Images[len(og.Images)-1].Width = w
}
case "og:image:height":
h, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
og.ensureHasImage()
og.Images[len(og.Images)-1].Height = h
}
case "og:video":
if len(og.Videos)>0 && og.Videos[len(og.Videos)-1].draft {
og.Videos[len(og.Videos)-1].URL = metaAttrs["content"]
og.Videos[len(og.Videos)-1].draft = false
} else {
og.Videos = append(og.Videos, &Video{URL: metaAttrs["content"]})
}
case "og:video:url":
og.ensureHasVideo()
og.Videos[len(og.Videos)-1].URL = metaAttrs["content"]
case "og:video:secure_url":
og.ensureHasVideo()
og.Videos[len(og.Videos)-1].SecureURL = metaAttrs["content"]
case "og:video:type":
og.ensureHasVideo()
og.Videos[len(og.Videos)-1].Type = metaAttrs["content"]
case "og:video:width":
w, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
og.ensureHasVideo()
og.Videos[len(og.Videos)-1].Width = w
}
case "og:video:height":
h, err := strconv.ParseUint(metaAttrs["content"], 10, 64)
if err == nil {
og.ensureHasVideo()
og.Videos[len(og.Videos)-1].Height = h
}
default:
if og.isArticle {
og.processArticleMeta(metaAttrs)
} else if og.isBook {
og.processBookMeta(metaAttrs)
} else if og.isProfile {
og.processProfileMeta(metaAttrs)
}
}
}
func (og *OpenGraph) processArticleMeta(metaAttrs map[string]string) {
if og.Article == nil {
og.Article = &Article{}
}
switch metaAttrs["property"] {
case "article:published_time":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Article.PublishedTime = &t
}
case "article:modified_time":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Article.ModifiedTime = &t
}
case "article:expiration_time":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Article.ExpirationTime = &t
}
case "article:section":
og.Article.Section = metaAttrs["content"]
case "article:tag":
og.Article.Tags = append(og.Article.Tags, metaAttrs["content"])
case "article:author:first_name":
if len(og.Article.Authors) == 0 {
og.Article.Authors = append(og.Article.Authors, &Profile{})
}
og.Article.Authors[len(og.Article.Authors)-1].FirstName = metaAttrs["content"]
case "article:author:last_name":
if len(og.Article.Authors) == 0 {
og.Article.Authors = append(og.Article.Authors, &Profile{})
}
og.Article.Authors[len(og.Article.Authors)-1].LastName = metaAttrs["content"]
case "article:author:username":
if len(og.Article.Authors) == 0 {
og.Article.Authors = append(og.Article.Authors, &Profile{})
}
og.Article.Authors[len(og.Article.Authors)-1].Username = metaAttrs["content"]
case "article:author:gender":
if len(og.Article.Authors) == 0 {
og.Article.Authors = append(og.Article.Authors, &Profile{})
}
og.Article.Authors[len(og.Article.Authors)-1].Gender = metaAttrs["content"]
}
}
func (og *OpenGraph) processBookMeta(metaAttrs map[string]string) {
if og.Book == nil {
og.Book = &Book{}
}
switch metaAttrs["property"] {
case "book:release_date":
t, err := time.Parse(time.RFC3339, metaAttrs["content"])
if err == nil {
og.Book.ReleaseDate = &t
}
case "book:isbn":
og.Book.ISBN = metaAttrs["content"]
case "book:tag":
og.Book.Tags = append(og.Book.Tags, metaAttrs["content"])
case "book:author:first_name":
if len(og.Book.Authors) == 0 {
og.Book.Authors = append(og.Book.Authors, &Profile{})
}
og.Book.Authors[len(og.Book.Authors)-1].FirstName = metaAttrs["content"]
case "book:author:last_name":
if len(og.Book.Authors) == 0 {
og.Book.Authors = append(og.Book.Authors, &Profile{})
}
og.Book.Authors[len(og.Book.Authors)-1].LastName = metaAttrs["content"]
case "book:author:username":
if len(og.Book.Authors) == 0 {
og.Book.Authors = append(og.Book.Authors, &Profile{})
}
og.Book.Authors[len(og.Book.Authors)-1].Username = metaAttrs["content"]
case "book:author:gender":
if len(og.Book.Authors) == 0 {
og.Book.Authors = append(og.Book.Authors, &Profile{})
}
og.Book.Authors[len(og.Book.Authors)-1].Gender = metaAttrs["content"]
}
}
func (og *OpenGraph) processProfileMeta(metaAttrs map[string]string) {
if og.Profile == nil {
og.Profile = &Profile{}
}
switch metaAttrs["property"] {
case "profile:first_name":
og.Profile.FirstName = metaAttrs["content"]
case "profile:last_name":
og.Profile.LastName = metaAttrs["content"]
case "profile:username":
og.Profile.Username = metaAttrs["content"]
case "profile:gender":
og.Profile.Gender = metaAttrs["content"]
}
}

0
vendor/github.com/go-ldap/ldap/.gitignore generated vendored Normal file
View File

31
vendor/github.com/go-ldap/ldap/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,31 @@
sudo: false
language: go
go:
- "1.5.x"
- "1.6.x"
- "1.7.x"
- "1.8.x"
- "1.9.x"
- "1.10.x"
- "1.11.x"
- "1.12.x"
- tip
git:
depth: 1
matrix:
fast_finish: true
allow_failures:
- go: tip
go_import_path: gopkg.in/ldap.v3
install:
- go get gopkg.in/asn1-ber.v1
- go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
- go get github.com/golang/lint/golint || go get golang.org/x/lint/golint || true
- go build -v ./...
script:
- make test
- make fmt
- make vet
- make lint

12
vendor/github.com/go-ldap/ldap/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
# Contribution Guidelines
We welcome contribution and improvements.
## Guiding Principles
To begin with here is a draft from an email exchange:
* take compatibility seriously (our semvers, compatibility with older go versions, etc)
* don't tag untested code for release
* beware of baking in implicit behavior based on other libraries/tools choices
* be as high-fidelity as possible in plumbing through LDAP data (don't mask errors or reduce power of someone using the library)

22
vendor/github.com/go-ldap/ldap/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
Portions copyright (c) 2015-2016 go-ldap Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

82
vendor/github.com/go-ldap/ldap/Makefile generated vendored Normal file
View File

@ -0,0 +1,82 @@
.PHONY: default install build test quicktest fmt vet lint
# List of all release tags "supported" by our current Go version
# E.g. ":go1.1:go1.2:go1.3:go1.4:go1.5:go1.6:go1.7:go1.8:go1.9:go1.10:go1.11:go1.12:"
GO_RELEASE_TAGS := $(shell go list -f ':{{join (context.ReleaseTags) ":"}}:' runtime)
# Only use the `-race` flag on newer versions of Go (version 1.3 and newer)
ifeq (,$(findstring :go1.3:,$(GO_RELEASE_TAGS)))
RACE_FLAG :=
else
RACE_FLAG := -race -cpu 1,2,4
endif
# Run `go vet` on Go 1.12 and newer. For Go 1.5-1.11, use `go tool vet`
ifneq (,$(findstring :go1.12:,$(GO_RELEASE_TAGS)))
GO_VET := go vet \
-atomic \
-bool \
-copylocks \
-nilfunc \
-printf \
-rangeloops \
-unreachable \
-unsafeptr \
-unusedresult \
.
else ifneq (,$(findstring :go1.5:,$(GO_RELEASE_TAGS)))
GO_VET := go tool vet \
-atomic \
-bool \
-copylocks \
-nilfunc \
-printf \
-shadow \
-rangeloops \
-unreachable \
-unsafeptr \
-unusedresult \
.
else
GO_VET := @echo "go vet skipped -- not supported on this version of Go"
endif
default: fmt vet lint build quicktest
install:
go get -t -v ./...
build:
go build -v ./...
test:
go test -v $(RACE_FLAG) -cover ./...
quicktest:
go test ./...
# Capture output and force failure when there is non-empty output
fmt:
@echo gofmt -l .
@OUTPUT=`gofmt -l . 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "gofmt must be run on the following files:"; \
echo "$$OUTPUT"; \
exit 1; \
fi
vet:
$(GO_VET)
# https://github.com/golang/lint
# go get github.com/golang/lint/golint
# Capture output and force failure when there is non-empty output
# Only run on go1.5+
lint:
@echo golint ./...
@OUTPUT=`command -v golint >/dev/null 2>&1 && golint ./... 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "golint errors:"; \
echo "$$OUTPUT"; \
exit 1; \
fi

54
vendor/github.com/go-ldap/ldap/README.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
[![GoDoc](https://godoc.org/gopkg.in/ldap.v3?status.svg)](https://godoc.org/gopkg.in/ldap.v3)
[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
# Basic LDAP v3 functionality for the GO programming language.
## Install
For the latest version use:
go get gopkg.in/ldap.v3
Import the latest version with:
import "gopkg.in/ldap.v3"
## Required Libraries:
- gopkg.in/asn1-ber.v1
## Features:
- Connecting to LDAP server (non-TLS, TLS, STARTTLS)
- Binding to LDAP server
- Searching for entries
- Filter Compile / Decompile
- Paging Search Results
- Modify Requests / Responses
- Add Requests / Responses
- Delete Requests / Responses
- Modify DN Requests / Responses
## Examples:
- search
- modify
## Contributing:
Bug reports and pull requests are welcome!
Before submitting a pull request, please make sure tests and verification scripts pass:
```
make all
```
To set up a pre-push hook to run the tests and verify scripts before pushing:
```
ln -s ../../.githooks/pre-push .git/hooks/pre-push
```
---
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
The design is licensed under the Creative Commons 3.0 Attributions license.
Read this article for more details: http://blog.golang.org/gopher

119
vendor/github.com/go-ldap/ldap/add.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
//
// https://tools.ietf.org/html/rfc4511
//
// AddRequest ::= [APPLICATION 8] SEQUENCE {
// entry LDAPDN,
// attributes AttributeList }
//
// AttributeList ::= SEQUENCE OF attribute Attribute
package ldap
import (
"errors"
"log"
"gopkg.in/asn1-ber.v1"
)
// Attribute represents an LDAP attribute
type Attribute struct {
// Type is the name of the LDAP attribute
Type string
// Vals are the LDAP attribute values
Vals []string
}
func (a *Attribute) encode() *ber.Packet {
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
for _, value := range a.Vals {
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
}
seq.AppendChild(set)
return seq
}
// AddRequest represents an LDAP AddRequest operation
type AddRequest struct {
// DN identifies the entry being added
DN string
// Attributes list the attributes of the new entry
Attributes []Attribute
// Controls hold optional controls to send with the request
Controls []Control
}
func (a AddRequest) encode() *ber.Packet {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN"))
attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
for _, attribute := range a.Attributes {
attributes.AppendChild(attribute.encode())
}
request.AppendChild(attributes)
return request
}
// Attribute adds an attribute with the given type and values
func (a *AddRequest) Attribute(attrType string, attrVals []string) {
a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals})
}
// NewAddRequest returns an AddRequest for the given DN, with no attributes
func NewAddRequest(dn string, controls []Control) *AddRequest {
return &AddRequest{
DN: dn,
Controls: controls,
}
}
// Add performs the given AddRequest
func (l *Conn) Add(addRequest *AddRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
packet.AppendChild(addRequest.encode())
if len(addRequest.Controls) > 0 {
packet.AppendChild(encodeControls(addRequest.Controls))
}
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return err
}
ber.PrintPacket(packet)
}
if packet.Children[1].Tag == ApplicationAddResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
l.Debug.Printf("%d: returning", msgCtx.id)
return nil
}

135
vendor/github.com/go-ldap/ldap/bind.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
package ldap
import (
"errors"
"fmt"
"gopkg.in/asn1-ber.v1"
)
// SimpleBindRequest represents a username/password bind operation
type SimpleBindRequest struct {
// Username is the name of the Directory object that the client wishes to bind as
Username string
// Password is the credentials to bind with
Password string
// Controls are optional controls to send with the bind request
Controls []Control
// AllowEmptyPassword sets whether the client allows binding with an empty password
// (normally used for unauthenticated bind).
AllowEmptyPassword bool
}
// SimpleBindResult contains the response from the server
type SimpleBindResult struct {
Controls []Control
}
// NewSimpleBindRequest returns a bind request
func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
return &SimpleBindRequest{
Username: username,
Password: password,
Controls: controls,
AllowEmptyPassword: false,
}
}
func (bindRequest *SimpleBindRequest) encode() *ber.Packet {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name"))
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password"))
return request
}
// SimpleBind performs the simple bind operation defined in the given request
func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword {
return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
}
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
encodedBindRequest := simpleBindRequest.encode()
packet.AppendChild(encodedBindRequest)
if len(simpleBindRequest.Controls) > 0 {
packet.AppendChild(encodeControls(simpleBindRequest.Controls))
}
if l.Debug {
ber.PrintPacket(packet)
}
msgCtx, err := l.sendMessage(packet)
if err != nil {
return nil, err
}
defer l.finishMessage(msgCtx)
packetResponse, ok := <-msgCtx.responses
if !ok {
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return nil, err
}
if l.Debug {
if err = addLDAPDescriptions(packet); err != nil {
return nil, err
}
ber.PrintPacket(packet)
}
result := &SimpleBindResult{
Controls: make([]Control, 0),
}
if len(packet.Children) == 3 {
for _, child := range packet.Children[2].Children {
decodedChild, decodeErr := DecodeControl(child)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode child control: %s", decodeErr)
}
result.Controls = append(result.Controls, decodedChild)
}
}
err = GetLDAPError(packet)
return result, err
}
// Bind performs a bind with the given username and password.
//
// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method
// for that.
func (l *Conn) Bind(username, password string) error {
req := &SimpleBindRequest{
Username: username,
Password: password,
AllowEmptyPassword: false,
}
_, err := l.SimpleBind(req)
return err
}
// UnauthenticatedBind performs an unauthenticated bind.
//
// A username may be provided for trace (e.g. logging) purpose only, but it is normally not
// authenticated or otherwise validated by the LDAP server.
//
// See https://tools.ietf.org/html/rfc4513#section-5.1.2 .
// See https://tools.ietf.org/html/rfc4513#section-6.3.1 .
func (l *Conn) UnauthenticatedBind(username string) error {
req := &SimpleBindRequest{
Username: username,
Password: "",
AllowEmptyPassword: true,
}
_, err := l.SimpleBind(req)
return err
}

28
vendor/github.com/go-ldap/ldap/client.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package ldap
import (
"crypto/tls"
"time"
)
// Client knows how to interact with an LDAP server
type Client interface {
Start()
StartTLS(config *tls.Config) error
Close()
SetTimeout(time.Duration)
Bind(username, password string) error
SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error)
Add(addRequest *AddRequest) error
Del(delRequest *DelRequest) error
Modify(modifyRequest *ModifyRequest) error
ModifyDN(modifyDNRequest *ModifyDNRequest) error
Compare(dn, attribute, value string) (bool, error)
PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error)
Search(searchRequest *SearchRequest) (*SearchResult, error)
SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
}

83
vendor/github.com/go-ldap/ldap/compare.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// File contains Compare functionality
//
// https://tools.ietf.org/html/rfc4511
//
// CompareRequest ::= [APPLICATION 14] SEQUENCE {
// entry LDAPDN,
// ava AttributeValueAssertion }
//
// AttributeValueAssertion ::= SEQUENCE {
// attributeDesc AttributeDescription,
// assertionValue AssertionValue }
//
// AttributeDescription ::= LDAPString
// -- Constrained to <attributedescription>
// -- [RFC4512]
//
// AttributeValue ::= OCTET STRING
//
package ldap
import (
"errors"
"fmt"
"gopkg.in/asn1-ber.v1"
)
// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
// false with any error that occurs if any.
func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN"))
ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc"))
ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "AssertionValue"))
request.AppendChild(ava)
packet.AppendChild(request)
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return false, err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return false, err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return false, err
}
ber.PrintPacket(packet)
}
if packet.Children[1].Tag == ApplicationCompareResponse {
err := GetLDAPError(packet)
switch {
case IsErrorWithCode(err, LDAPResultCompareTrue):
return true, nil
case IsErrorWithCode(err, LDAPResultCompareFalse):
return false, nil
default:
return false, err
}
}
return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)
}

516
vendor/github.com/go-ldap/ldap/conn.go generated vendored Normal file
View File

@ -0,0 +1,516 @@
package ldap
import (
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/url"
"sync"
"sync/atomic"
"time"
"gopkg.in/asn1-ber.v1"
)
const (
// MessageQuit causes the processMessages loop to exit
MessageQuit = 0
// MessageRequest sends a request to the server
MessageRequest = 1
// MessageResponse receives a response from the server
MessageResponse = 2
// MessageFinish indicates the client considers a particular message ID to be finished
MessageFinish = 3
// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
MessageTimeout = 4
)
const (
// DefaultLdapPort default ldap port for pure TCP connection
DefaultLdapPort = "389"
// DefaultLdapsPort default ldap port for SSL connection
DefaultLdapsPort = "636"
)
// PacketResponse contains the packet or error encountered reading a response
type PacketResponse struct {
// Packet is the packet read from the server
Packet *ber.Packet
// Error is an error encountered while reading
Error error
}
// ReadPacket returns the packet or an error
func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
}
return pr.Packet, pr.Error
}
type messageContext struct {
id int64
// close(done) should only be called from finishMessage()
done chan struct{}
// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
responses chan *PacketResponse
}
// sendResponse should only be called within the processMessages() loop which
// is also responsible for closing the responses channel.
func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
select {
case msgCtx.responses <- packet:
// Successfully sent packet to message handler.
case <-msgCtx.done:
// The request handler is done and will not receive more
// packets.
}
}
type messagePacket struct {
Op int
MessageID int64
Packet *ber.Packet
Context *messageContext
}
type sendMessageFlags uint
const (
startTLS sendMessageFlags = 1 << iota
)
// Conn represents an LDAP Connection
type Conn struct {
// requestTimeout is loaded atomically
// so we need to ensure 64-bit alignment on 32-bit platforms.
requestTimeout int64
conn net.Conn
isTLS bool
closing uint32
closeErr atomic.Value
isStartingTLS bool
Debug debugging
chanConfirm chan struct{}
messageContexts map[int64]*messageContext
chanMessage chan *messagePacket
chanMessageID chan int64
wgClose sync.WaitGroup
outstandingRequests uint
messageMutex sync.Mutex
}
var _ Client = &Conn{}
// DefaultTimeout is a package-level variable that sets the timeout value
// used for the Dial and DialTLS methods.
//
// WARNING: since this is a package-level variable, setting this value from
// multiple places will probably result in undesired behaviour.
var DefaultTimeout = 60 * time.Second
// Dial connects to the given address on the given network using net.Dial
// and then returns a new Conn for the connection.
func Dial(network, addr string) (*Conn, error) {
c, err := net.DialTimeout(network, addr, DefaultTimeout)
if err != nil {
return nil, NewError(ErrorNetwork, err)
}
conn := NewConn(c, false)
conn.Start()
return conn, nil
}
// DialTLS connects to the given address on the given network using tls.Dial
// and then returns a new Conn for the connection.
func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config)
if err != nil {
return nil, NewError(ErrorNetwork, err)
}
conn := NewConn(c, true)
conn.Start()
return conn, nil
}
// DialURL connects to the given ldap URL vie TCP using tls.Dial or net.Dial if ldaps://
// or ldap:// specified as protocol. On success a new Conn for the connection
// is returned.
func DialURL(addr string) (*Conn, error) {
lurl, err := url.Parse(addr)
if err != nil {
return nil, NewError(ErrorNetwork, err)
}
host, port, err := net.SplitHostPort(lurl.Host)
if err != nil {
// we asume that error is due to missing port
host = lurl.Host
port = ""
}
switch lurl.Scheme {
case "ldap":
if port == "" {
port = DefaultLdapPort
}
return Dial("tcp", net.JoinHostPort(host, port))
case "ldaps":
if port == "" {
port = DefaultLdapsPort
}
tlsConf := &tls.Config{
ServerName: host,
}
return DialTLS("tcp", net.JoinHostPort(host, port), tlsConf)
}
return nil, NewError(ErrorNetwork, fmt.Errorf("Unknown scheme '%s'", lurl.Scheme))
}
// NewConn returns a new Conn using conn for network I/O.
func NewConn(conn net.Conn, isTLS bool) *Conn {
return &Conn{
conn: conn,
chanConfirm: make(chan struct{}),
chanMessageID: make(chan int64),
chanMessage: make(chan *messagePacket, 10),
messageContexts: map[int64]*messageContext{},
requestTimeout: 0,
isTLS: isTLS,
}
}
// Start initializes goroutines to read responses and process messages
func (l *Conn) Start() {
go l.reader()
go l.processMessages()
l.wgClose.Add(1)
}
// IsClosing returns whether or not we're currently closing.
func (l *Conn) IsClosing() bool {
return atomic.LoadUint32(&l.closing) == 1
}
// setClosing sets the closing value to true
func (l *Conn) setClosing() bool {
return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
}
// Close closes the connection.
func (l *Conn) Close() {
l.messageMutex.Lock()
defer l.messageMutex.Unlock()
if l.setClosing() {
l.Debug.Printf("Sending quit message and waiting for confirmation")
l.chanMessage <- &messagePacket{Op: MessageQuit}
<-l.chanConfirm
close(l.chanMessage)
l.Debug.Printf("Closing network connection")
if err := l.conn.Close(); err != nil {
log.Println(err)
}
l.wgClose.Done()
}
l.wgClose.Wait()
}
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
func (l *Conn) SetTimeout(timeout time.Duration) {
if timeout > 0 {
atomic.StoreInt64(&l.requestTimeout, int64(timeout))
}
}
// Returns the next available messageID
func (l *Conn) nextMessageID() int64 {
if messageID, ok := <-l.chanMessageID; ok {
return messageID
}
return 0
}
// StartTLS sends the command to start a TLS session and then creates a new TLS Client
func (l *Conn) StartTLS(config *tls.Config) error {
if l.isTLS {
return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
}
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
packet.AppendChild(request)
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
l.Close()
return err
}
ber.PrintPacket(packet)
}
if err := GetLDAPError(packet); err == nil {
conn := tls.Client(l.conn, config)
if connErr := conn.Handshake(); connErr != nil {
l.Close()
return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr))
}
l.isTLS = true
l.conn = conn
} else {
return err
}
go l.reader()
return nil
}
// TLSConnectionState returns the client's TLS connection state.
// The return values are their zero values if StartTLS did
// not succeed.
func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := l.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
return l.sendMessageWithFlags(packet, 0)
}
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
if l.IsClosing() {
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
}
l.messageMutex.Lock()
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
if l.isStartingTLS {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
}
if flags&startTLS != 0 {
if l.outstandingRequests != 0 {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
}
l.isStartingTLS = true
}
l.outstandingRequests++
l.messageMutex.Unlock()
responses := make(chan *PacketResponse)
messageID := packet.Children[0].Value.(int64)
message := &messagePacket{
Op: MessageRequest,
MessageID: messageID,
Packet: packet,
Context: &messageContext{
id: messageID,
done: make(chan struct{}),
responses: responses,
},
}
l.sendProcessMessage(message)
return message.Context, nil
}
func (l *Conn) finishMessage(msgCtx *messageContext) {
close(msgCtx.done)
if l.IsClosing() {
return
}
l.messageMutex.Lock()
l.outstandingRequests--
if l.isStartingTLS {
l.isStartingTLS = false
}
l.messageMutex.Unlock()
message := &messagePacket{
Op: MessageFinish,
MessageID: msgCtx.id,
}
l.sendProcessMessage(message)
}
func (l *Conn) sendProcessMessage(message *messagePacket) bool {
l.messageMutex.Lock()
defer l.messageMutex.Unlock()
if l.IsClosing() {
return false
}
l.chanMessage <- message
return true
}
func (l *Conn) processMessages() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in processMessages: %v", err)
}
for messageID, msgCtx := range l.messageContexts {
// If we are closing due to an error, inform anyone who
// is waiting about the error.
if l.IsClosing() && l.closeErr.Load() != nil {
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
}
l.Debug.Printf("Closing channel for MessageID %d", messageID)
close(msgCtx.responses)
delete(l.messageContexts, messageID)
}
close(l.chanMessageID)
close(l.chanConfirm)
}()
var messageID int64 = 1
for {
select {
case l.chanMessageID <- messageID:
messageID++
case message := <-l.chanMessage:
switch message.Op {
case MessageQuit:
l.Debug.Printf("Shutting down - quit message received")
return
case MessageRequest:
// Add to message list and write to network
l.Debug.Printf("Sending message %d", message.MessageID)
buf := message.Packet.Bytes()
_, err := l.conn.Write(buf)
if err != nil {
l.Debug.Printf("Error Sending Message: %s", err.Error())
message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
close(message.Context.responses)
break
}
// Only add to messageContexts if we were able to
// successfully write the message.
l.messageContexts[message.MessageID] = message.Context
// Add timeout if defined
requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout))
if requestTimeout > 0 {
go func() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
}
}()
time.Sleep(requestTimeout)
timeoutMessage := &messagePacket{
Op: MessageTimeout,
MessageID: message.MessageID,
}
l.sendProcessMessage(timeoutMessage)
}()
}
case MessageResponse:
l.Debug.Printf("Receiving message %d", message.MessageID)
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
} else {
log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing())
ber.PrintPacket(message.Packet)
}
case MessageTimeout:
// Handle the timeout by closing the channel
// All reads will return immediately
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
delete(l.messageContexts, message.MessageID)
close(msgCtx.responses)
}
case MessageFinish:
l.Debug.Printf("Finished message %d", message.MessageID)
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
delete(l.messageContexts, message.MessageID)
close(msgCtx.responses)
}
}
}
}
}
func (l *Conn) reader() {
cleanstop := false
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in reader: %v", err)
}
if !cleanstop {
l.Close()
}
}()
for {
if cleanstop {
l.Debug.Printf("reader clean stopping (without closing the connection)")
return
}
packet, err := ber.ReadPacket(l.conn)
if err != nil {
// A read error is expected here if we are closing the connection...
if !l.IsClosing() {
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
l.Debug.Printf("reader error: %s", err.Error())
}
return
}
addLDAPDescriptions(packet)
if len(packet.Children) == 0 {
l.Debug.Printf("Received bad ldap packet")
continue
}
l.messageMutex.Lock()
if l.isStartingTLS {
cleanstop = true
}
l.messageMutex.Unlock()
message := &messagePacket{
Op: MessageResponse,
MessageID: packet.Children[0].Value.(int64),
Packet: packet,
}
if !l.sendProcessMessage(message) {
return
}
}
}

499
vendor/github.com/go-ldap/ldap/control.go generated vendored Normal file
View File

@ -0,0 +1,499 @@
package ldap
import (
"fmt"
"strconv"
"gopkg.in/asn1-ber.v1"
)
const (
// ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
ControlTypePaging = "1.2.840.113556.1.4.319"
// ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
// ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
// ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
// ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
// ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528"
// ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417"
)
// ControlTypeMap maps controls to text descriptions
var ControlTypeMap = map[string]string{
ControlTypePaging: "Paging",
ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
ControlTypeManageDsaIT: "Manage DSA IT",
ControlTypeMicrosoftNotification: "Change Notification - Microsoft",
ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft",
}
// Control defines an interface controls provide to encode and describe themselves
type Control interface {
// GetControlType returns the OID
GetControlType() string
// Encode returns the ber packet representation
Encode() *ber.Packet
// String returns a human-readable description
String() string
}
// ControlString implements the Control interface for simple controls
type ControlString struct {
ControlType string
Criticality bool
ControlValue string
}
// GetControlType returns the OID
func (c *ControlString) GetControlType() string {
return c.ControlType
}
// Encode returns the ber packet representation
func (c *ControlString) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
if c.Criticality {
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
}
if c.ControlValue != "" {
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
}
return packet
}
// String returns a human-readable description
func (c *ControlString) String() string {
return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
}
// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
type ControlPaging struct {
// PagingSize indicates the page size
PagingSize uint32
// Cookie is an opaque value returned by the server to track a paging cursor
Cookie []byte
}
// GetControlType returns the OID
func (c *ControlPaging) GetControlType() string {
return ControlTypePaging
}
// Encode returns the ber packet representation
func (c *ControlPaging) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
cookie.Value = c.Cookie
cookie.Data.Write(c.Cookie)
seq.AppendChild(cookie)
p2.AppendChild(seq)
packet.AppendChild(p2)
return packet
}
// String returns a human-readable description
func (c *ControlPaging) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
ControlTypeMap[ControlTypePaging],
ControlTypePaging,
false,
c.PagingSize,
c.Cookie)
}
// SetCookie stores the given cookie in the paging control
func (c *ControlPaging) SetCookie(cookie []byte) {
c.Cookie = cookie
}
// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
type ControlBeheraPasswordPolicy struct {
// Expire contains the number of seconds before a password will expire
Expire int64
// Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
Grace int64
// Error indicates the error code
Error int8
// ErrorString is a human readable error
ErrorString string
}
// GetControlType returns the OID
func (c *ControlBeheraPasswordPolicy) GetControlType() string {
return ControlTypeBeheraPasswordPolicy
}
// Encode returns the ber packet representation
func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
return packet
}
// String returns a human-readable description
func (c *ControlBeheraPasswordPolicy) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
ControlTypeMap[ControlTypeBeheraPasswordPolicy],
ControlTypeBeheraPasswordPolicy,
false,
c.Expire,
c.Grace,
c.Error,
c.ErrorString)
}
// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
type ControlVChuPasswordMustChange struct {
// MustChange indicates if the password is required to be changed
MustChange bool
}
// GetControlType returns the OID
func (c *ControlVChuPasswordMustChange) GetControlType() string {
return ControlTypeVChuPasswordMustChange
}
// Encode returns the ber packet representation
func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
return nil
}
// String returns a human-readable description
func (c *ControlVChuPasswordMustChange) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t MustChange: %v",
ControlTypeMap[ControlTypeVChuPasswordMustChange],
ControlTypeVChuPasswordMustChange,
false,
c.MustChange)
}
// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
type ControlVChuPasswordWarning struct {
// Expire indicates the time in seconds until the password expires
Expire int64
}
// GetControlType returns the OID
func (c *ControlVChuPasswordWarning) GetControlType() string {
return ControlTypeVChuPasswordWarning
}
// Encode returns the ber packet representation
func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
return nil
}
// String returns a human-readable description
func (c *ControlVChuPasswordWarning) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t Expire: %b",
ControlTypeMap[ControlTypeVChuPasswordWarning],
ControlTypeVChuPasswordWarning,
false,
c.Expire)
}
// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
type ControlManageDsaIT struct {
// Criticality indicates if this control is required
Criticality bool
}
// GetControlType returns the OID
func (c *ControlManageDsaIT) GetControlType() string {
return ControlTypeManageDsaIT
}
// Encode returns the ber packet representation
func (c *ControlManageDsaIT) Encode() *ber.Packet {
//FIXME
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
if c.Criticality {
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
}
return packet
}
// String returns a human-readable description
func (c *ControlManageDsaIT) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t",
ControlTypeMap[ControlTypeManageDsaIT],
ControlTypeManageDsaIT,
c.Criticality)
}
// NewControlManageDsaIT returns a ControlManageDsaIT control
func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
return &ControlManageDsaIT{Criticality: Criticality}
}
// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
type ControlMicrosoftNotification struct{}
// GetControlType returns the OID
func (c *ControlMicrosoftNotification) GetControlType() string {
return ControlTypeMicrosoftNotification
}
// Encode returns the ber packet representation
func (c *ControlMicrosoftNotification) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")"))
return packet
}
// String returns a human-readable description
func (c *ControlMicrosoftNotification) String() string {
return fmt.Sprintf(
"Control Type: %s (%q)",
ControlTypeMap[ControlTypeMicrosoftNotification],
ControlTypeMicrosoftNotification)
}
// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control
func NewControlMicrosoftNotification() *ControlMicrosoftNotification {
return &ControlMicrosoftNotification{}
}
// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
type ControlMicrosoftShowDeleted struct{}
// GetControlType returns the OID
func (c *ControlMicrosoftShowDeleted) GetControlType() string {
return ControlTypeMicrosoftShowDeleted
}
// Encode returns the ber packet representation
func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")"))
return packet
}
// String returns a human-readable description
func (c *ControlMicrosoftShowDeleted) String() string {
return fmt.Sprintf(
"Control Type: %s (%q)",
ControlTypeMap[ControlTypeMicrosoftShowDeleted],
ControlTypeMicrosoftShowDeleted)
}
// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control
func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted {
return &ControlMicrosoftShowDeleted{}
}
// FindControl returns the first control of the given type in the list, or nil
func FindControl(controls []Control, controlType string) Control {
for _, c := range controls {
if c.GetControlType() == controlType {
return c
}
}
return nil
}
// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
func DecodeControl(packet *ber.Packet) (Control, error) {
var (
ControlType = ""
Criticality = false
value *ber.Packet
)
switch len(packet.Children) {
case 0:
// at least one child is required for control type
return nil, fmt.Errorf("at least one child is required for control type")
case 1:
// just type, no criticality or value
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
ControlType = packet.Children[0].Value.(string)
case 2:
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
ControlType = packet.Children[0].Value.(string)
// Children[1] could be criticality or value (both are optional)
// duck-type on whether this is a boolean
if _, ok := packet.Children[1].Value.(bool); ok {
packet.Children[1].Description = "Criticality"
Criticality = packet.Children[1].Value.(bool)
} else {
packet.Children[1].Description = "Control Value"
value = packet.Children[1]
}
case 3:
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
ControlType = packet.Children[0].Value.(string)
packet.Children[1].Description = "Criticality"
Criticality = packet.Children[1].Value.(bool)
packet.Children[2].Description = "Control Value"
value = packet.Children[2]
default:
// more than 3 children is invalid
return nil, fmt.Errorf("more than 3 children is invalid for controls")
}
switch ControlType {
case ControlTypeManageDsaIT:
return NewControlManageDsaIT(Criticality), nil
case ControlTypePaging:
value.Description += " (Paging)"
c := new(ControlPaging)
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
value.AppendChild(valueChildren)
}
value = value.Children[0]
value.Description = "Search Control Value"
value.Children[0].Description = "Paging Size"
value.Children[1].Description = "Cookie"
c.PagingSize = uint32(value.Children[0].Value.(int64))
c.Cookie = value.Children[1].Data.Bytes()
value.Children[1].Value = c.Cookie
return c, nil
case ControlTypeBeheraPasswordPolicy:
value.Description += " (Password Policy - Behera)"
c := NewControlBeheraPasswordPolicy()
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
value.AppendChild(valueChildren)
}
sequence := value.Children[0]
for _, child := range sequence.Children {
if child.Tag == 0 {
//Warning
warningPacket := child.Children[0]
packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int64)
if ok {
if warningPacket.Tag == 0 {
//timeBeforeExpiration
c.Expire = val
warningPacket.Value = c.Expire
} else if warningPacket.Tag == 1 {
//graceAuthNsRemaining
c.Grace = val
warningPacket.Value = c.Grace
}
}
} else if child.Tag == 1 {
// Error
packet, err := ber.DecodePacketErr(child.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int8)
if !ok {
// what to do?
val = -1
}
c.Error = val
child.Value = c.Error
c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
}
}
return c, nil
case ControlTypeVChuPasswordMustChange:
c := &ControlVChuPasswordMustChange{MustChange: true}
return c, nil
case ControlTypeVChuPasswordWarning:
c := &ControlVChuPasswordWarning{Expire: -1}
expireStr := ber.DecodeString(value.Data.Bytes())
expire, err := strconv.ParseInt(expireStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse value as int: %s", err)
}
c.Expire = expire
value.Value = c.Expire
return c, nil
case ControlTypeMicrosoftNotification:
return NewControlMicrosoftNotification(), nil
case ControlTypeMicrosoftShowDeleted:
return NewControlMicrosoftShowDeleted(), nil
default:
c := new(ControlString)
c.ControlType = ControlType
c.Criticality = Criticality
if value != nil {
c.ControlValue = value.Value.(string)
}
return c, nil
}
}
// NewControlString returns a generic control
func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
return &ControlString{
ControlType: controlType,
Criticality: criticality,
ControlValue: controlValue,
}
}
// NewControlPaging returns a paging control
func NewControlPaging(pagingSize uint32) *ControlPaging {
return &ControlPaging{PagingSize: pagingSize}
}
// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
return &ControlBeheraPasswordPolicy{
Expire: -1,
Grace: -1,
Error: -1,
}
}
func encodeControls(controls []Control) *ber.Packet {
packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
for _, control := range controls {
packet.AppendChild(control.Encode())
}
return packet
}

24
vendor/github.com/go-ldap/ldap/debug.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package ldap
import (
"log"
"gopkg.in/asn1-ber.v1"
)
// debugging type
// - has a Printf method to write the debug output
type debugging bool
// write debug output
func (debug debugging) Printf(format string, args ...interface{}) {
if debug {
log.Printf(format, args...)
}
}
func (debug debugging) PrintPacket(packet *ber.Packet) {
if debug {
ber.PrintPacket(packet)
}
}

84
vendor/github.com/go-ldap/ldap/del.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
//
// https://tools.ietf.org/html/rfc4511
//
// DelRequest ::= [APPLICATION 10] LDAPDN
package ldap
import (
"errors"
"log"
"gopkg.in/asn1-ber.v1"
)
// DelRequest implements an LDAP deletion request
type DelRequest struct {
// DN is the name of the directory entry to delete
DN string
// Controls hold optional controls to send with the request
Controls []Control
}
func (d DelRequest) encode() *ber.Packet {
request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request")
request.Data.Write([]byte(d.DN))
return request
}
// NewDelRequest creates a delete request for the given DN and controls
func NewDelRequest(DN string,
Controls []Control) *DelRequest {
return &DelRequest{
DN: DN,
Controls: Controls,
}
}
// Del executes the given delete request
func (l *Conn) Del(delRequest *DelRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
packet.AppendChild(delRequest.encode())
if len(delRequest.Controls) > 0 {
packet.AppendChild(encodeControls(delRequest.Controls))
}
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return err
}
ber.PrintPacket(packet)
}
if packet.Children[1].Tag == ApplicationDelResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
l.Debug.Printf("%d: returning", msgCtx.id)
return nil
}

247
vendor/github.com/go-ldap/ldap/dn.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
// File contains DN parsing functionality
//
// https://tools.ietf.org/html/rfc4514
//
// distinguishedName = [ relativeDistinguishedName
// *( COMMA relativeDistinguishedName ) ]
// relativeDistinguishedName = attributeTypeAndValue
// *( PLUS attributeTypeAndValue )
// attributeTypeAndValue = attributeType EQUALS attributeValue
// attributeType = descr / numericoid
// attributeValue = string / hexstring
//
// ; The following characters are to be escaped when they appear
// ; in the value to be encoded: ESC, one of <escaped>, leading
// ; SHARP or SPACE, trailing SPACE, and NULL.
// string = [ ( leadchar / pair ) [ *( stringchar / pair )
// ( trailchar / pair ) ] ]
//
// leadchar = LUTF1 / UTFMB
// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A /
// %x3D / %x3F-5B / %x5D-7F
//
// trailchar = TUTF1 / UTFMB
// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A /
// %x3D / %x3F-5B / %x5D-7F
//
// stringchar = SUTF1 / UTFMB
// SUTF1 = %x01-21 / %x23-2A / %x2D-3A /
// %x3D / %x3F-5B / %x5D-7F
//
// pair = ESC ( ESC / special / hexpair )
// special = escaped / SPACE / SHARP / EQUALS
// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
// hexstring = SHARP 1*hexpair
// hexpair = HEX HEX
//
// where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>,
// <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>,
// <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512].
//
package ldap
import (
"bytes"
enchex "encoding/hex"
"errors"
"fmt"
"strings"
"gopkg.in/asn1-ber.v1"
)
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
type AttributeTypeAndValue struct {
// Type is the attribute type
Type string
// Value is the attribute value
Value string
}
// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
type RelativeDN struct {
Attributes []*AttributeTypeAndValue
}
// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
type DN struct {
RDNs []*RelativeDN
}
// ParseDN returns a distinguishedName or an error
func ParseDN(str string) (*DN, error) {
dn := new(DN)
dn.RDNs = make([]*RelativeDN, 0)
rdn := new(RelativeDN)
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
buffer := bytes.Buffer{}
attribute := new(AttributeTypeAndValue)
escaping := false
unescapedTrailingSpaces := 0
stringFromBuffer := func() string {
s := buffer.String()
s = s[0 : len(s)-unescapedTrailingSpaces]
buffer.Reset()
unescapedTrailingSpaces = 0
return s
}
for i := 0; i < len(str); i++ {
char := str[i]
switch {
case escaping:
unescapedTrailingSpaces = 0
escaping = false
switch char {
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
buffer.WriteByte(char)
continue
}
// Not a special character, assume hex encoded octet
if len(str) == i+1 {
return nil, errors.New("got corrupted escaped character")
}
dst := []byte{0}
n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
if err != nil {
return nil, fmt.Errorf("failed to decode escaped character: %s", err)
} else if n != 1 {
return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n)
}
buffer.WriteByte(dst[0])
i++
case char == '\\':
unescapedTrailingSpaces = 0
escaping = true
case char == '=':
attribute.Type = stringFromBuffer()
// Special case: If the first character in the value is # the
// following data is BER encoded so we can just fast forward
// and decode.
if len(str) > i+1 && str[i+1] == '#' {
i += 2
index := strings.IndexAny(str[i:], ",+")
data := str
if index > 0 {
data = str[i : i+index]
} else {
data = str[i:]
}
rawBER, err := enchex.DecodeString(data)
if err != nil {
return nil, fmt.Errorf("failed to decode BER encoding: %s", err)
}
packet, err := ber.DecodePacketErr(rawBER)
if err != nil {
return nil, fmt.Errorf("failed to decode BER packet: %s", err)
}
buffer.WriteString(packet.Data.String())
i += len(data) - 1
}
case char == ',' || char == '+':
// We're done with this RDN or value, push it
if len(attribute.Type) == 0 {
return nil, errors.New("incomplete type, value pair")
}
attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
attribute = new(AttributeTypeAndValue)
if char == ',' {
dn.RDNs = append(dn.RDNs, rdn)
rdn = new(RelativeDN)
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
}
case char == ' ' && buffer.Len() == 0:
// ignore unescaped leading spaces
continue
default:
if char == ' ' {
// Track unescaped spaces in case they are trailing and we need to remove them
unescapedTrailingSpaces++
} else {
// Reset if we see a non-space char
unescapedTrailingSpaces = 0
}
buffer.WriteByte(char)
}
}
if buffer.Len() > 0 {
if len(attribute.Type) == 0 {
return nil, errors.New("DN ended with incomplete type, value pair")
}
attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
dn.RDNs = append(dn.RDNs, rdn)
}
return dn, nil
}
// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
// Returns true if they have the same number of relative distinguished names
// and corresponding relative distinguished names (by position) are the same.
func (d *DN) Equal(other *DN) bool {
if len(d.RDNs) != len(other.RDNs) {
return false
}
for i := range d.RDNs {
if !d.RDNs[i].Equal(other.RDNs[i]) {
return false
}
}
return true
}
// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
func (d *DN) AncestorOf(other *DN) bool {
if len(d.RDNs) >= len(other.RDNs) {
return false
}
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
for i := range d.RDNs {
if !d.RDNs[i].Equal(otherRDNs[i]) {
return false
}
}
return true
}
// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
// The order of attributes is not significant.
// Case of attribute types is not significant.
func (r *RelativeDN) Equal(other *RelativeDN) bool {
if len(r.Attributes) != len(other.Attributes) {
return false
}
return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
}
func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
for _, attr := range attrs {
found := false
for _, myattr := range r.Attributes {
if myattr.Equal(attr) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
// Case of the attribute type is not significant
func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
}

4
vendor/github.com/go-ldap/ldap/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
/*
Package ldap provides basic LDAP v3 functionality.
*/
package ldap

234
vendor/github.com/go-ldap/ldap/error.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
package ldap
import (
"fmt"
"gopkg.in/asn1-ber.v1"
)
// LDAP Result Codes
const (
LDAPResultSuccess = 0
LDAPResultOperationsError = 1
LDAPResultProtocolError = 2
LDAPResultTimeLimitExceeded = 3
LDAPResultSizeLimitExceeded = 4
LDAPResultCompareFalse = 5
LDAPResultCompareTrue = 6
LDAPResultAuthMethodNotSupported = 7
LDAPResultStrongAuthRequired = 8
LDAPResultReferral = 10
LDAPResultAdminLimitExceeded = 11
LDAPResultUnavailableCriticalExtension = 12
LDAPResultConfidentialityRequired = 13
LDAPResultSaslBindInProgress = 14
LDAPResultNoSuchAttribute = 16
LDAPResultUndefinedAttributeType = 17
LDAPResultInappropriateMatching = 18
LDAPResultConstraintViolation = 19
LDAPResultAttributeOrValueExists = 20
LDAPResultInvalidAttributeSyntax = 21
LDAPResultNoSuchObject = 32
LDAPResultAliasProblem = 33
LDAPResultInvalidDNSyntax = 34
LDAPResultIsLeaf = 35
LDAPResultAliasDereferencingProblem = 36
LDAPResultInappropriateAuthentication = 48
LDAPResultInvalidCredentials = 49
LDAPResultInsufficientAccessRights = 50
LDAPResultBusy = 51
LDAPResultUnavailable = 52
LDAPResultUnwillingToPerform = 53
LDAPResultLoopDetect = 54
LDAPResultSortControlMissing = 60
LDAPResultOffsetRangeError = 61
LDAPResultNamingViolation = 64
LDAPResultObjectClassViolation = 65
LDAPResultNotAllowedOnNonLeaf = 66
LDAPResultNotAllowedOnRDN = 67
LDAPResultEntryAlreadyExists = 68
LDAPResultObjectClassModsProhibited = 69
LDAPResultResultsTooLarge = 70
LDAPResultAffectsMultipleDSAs = 71
LDAPResultVirtualListViewErrorOrControlError = 76
LDAPResultOther = 80
LDAPResultServerDown = 81
LDAPResultLocalError = 82
LDAPResultEncodingError = 83
LDAPResultDecodingError = 84
LDAPResultTimeout = 85
LDAPResultAuthUnknown = 86
LDAPResultFilterError = 87
LDAPResultUserCanceled = 88
LDAPResultParamError = 89
LDAPResultNoMemory = 90
LDAPResultConnectError = 91
LDAPResultNotSupported = 92
LDAPResultControlNotFound = 93
LDAPResultNoResultsReturned = 94
LDAPResultMoreResultsToReturn = 95
LDAPResultClientLoop = 96
LDAPResultReferralLimitExceeded = 97
LDAPResultInvalidResponse = 100
LDAPResultAmbiguousResponse = 101
LDAPResultTLSNotSupported = 112
LDAPResultIntermediateResponse = 113
LDAPResultUnknownType = 114
LDAPResultCanceled = 118
LDAPResultNoSuchOperation = 119
LDAPResultTooLate = 120
LDAPResultCannotCancel = 121
LDAPResultAssertionFailed = 122
LDAPResultAuthorizationDenied = 123
LDAPResultSyncRefreshRequired = 4096
ErrorNetwork = 200
ErrorFilterCompile = 201
ErrorFilterDecompile = 202
ErrorDebugging = 203
ErrorUnexpectedMessage = 204
ErrorUnexpectedResponse = 205
ErrorEmptyPassword = 206
)
// LDAPResultCodeMap contains string descriptions for LDAP error codes
var LDAPResultCodeMap = map[uint16]string{
LDAPResultSuccess: "Success",
LDAPResultOperationsError: "Operations Error",
LDAPResultProtocolError: "Protocol Error",
LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
LDAPResultCompareFalse: "Compare False",
LDAPResultCompareTrue: "Compare True",
LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
LDAPResultStrongAuthRequired: "Strong Auth Required",
LDAPResultReferral: "Referral",
LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
LDAPResultConfidentialityRequired: "Confidentiality Required",
LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
LDAPResultNoSuchAttribute: "No Such Attribute",
LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
LDAPResultInappropriateMatching: "Inappropriate Matching",
LDAPResultConstraintViolation: "Constraint Violation",
LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
LDAPResultNoSuchObject: "No Such Object",
LDAPResultAliasProblem: "Alias Problem",
LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
LDAPResultIsLeaf: "Is Leaf",
LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
LDAPResultInvalidCredentials: "Invalid Credentials",
LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
LDAPResultBusy: "Busy",
LDAPResultUnavailable: "Unavailable",
LDAPResultUnwillingToPerform: "Unwilling To Perform",
LDAPResultLoopDetect: "Loop Detect",
LDAPResultSortControlMissing: "Sort Control Missing",
LDAPResultOffsetRangeError: "Result Offset Range Error",
LDAPResultNamingViolation: "Naming Violation",
LDAPResultObjectClassViolation: "Object Class Violation",
LDAPResultResultsTooLarge: "Results Too Large",
LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
LDAPResultEntryAlreadyExists: "Entry Already Exists",
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view",
LDAPResultOther: "Other",
LDAPResultServerDown: "Cannot establish a connection",
LDAPResultLocalError: "An error occurred",
LDAPResultEncodingError: "LDAP encountered an error while encoding",
LDAPResultDecodingError: "LDAP encountered an error while decoding",
LDAPResultTimeout: "LDAP timeout while waiting for a response from the server",
LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown",
LDAPResultFilterError: "An error occurred while encoding the given search filter",
LDAPResultUserCanceled: "The user canceled the operation",
LDAPResultParamError: "An invalid parameter was specified",
LDAPResultNoMemory: "Out of memory error",
LDAPResultConnectError: "A connection to the server could not be established",
LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP",
LDAPResultControlNotFound: "The controls required to perform the requested operation were not found",
LDAPResultNoResultsReturned: "No results were returned from the server",
LDAPResultMoreResultsToReturn: "There are more results in the chain of results",
LDAPResultClientLoop: "A loop has been detected. For example when following referrals",
LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded",
LDAPResultCanceled: "Operation was canceled",
LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation",
LDAPResultTooLate: "Too late to cancel the outstanding operation",
LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed",
LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed",
LDAPResultSyncRefreshRequired: "Refresh Required",
LDAPResultInvalidResponse: "Invalid Response",
LDAPResultAmbiguousResponse: "Ambiguous Response",
LDAPResultTLSNotSupported: "Tls Not Supported",
LDAPResultIntermediateResponse: "Intermediate Response",
LDAPResultUnknownType: "Unknown Type",
LDAPResultAuthorizationDenied: "Authorization Denied",
ErrorNetwork: "Network Error",
ErrorFilterCompile: "Filter Compile Error",
ErrorFilterDecompile: "Filter Decompile Error",
ErrorDebugging: "Debugging Error",
ErrorUnexpectedMessage: "Unexpected Message",
ErrorUnexpectedResponse: "Unexpected Response",
ErrorEmptyPassword: "Empty password not allowed by the client",
}
// Error holds LDAP error information
type Error struct {
// Err is the underlying error
Err error
// ResultCode is the LDAP error code
ResultCode uint16
// MatchedDN is the matchedDN returned if any
MatchedDN string
}
func (e *Error) Error() string {
return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
}
// GetLDAPError creates an Error out of a BER packet representing a LDAPResult
// The return is an error object. It can be casted to a Error structure.
// This function returns nil if resultCode in the LDAPResult sequence is success(0).
func GetLDAPError(packet *ber.Packet) error {
if packet == nil {
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")}
} else if len(packet.Children) >= 2 {
response := packet.Children[1]
if response == nil {
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")}
}
if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
resultCode := uint16(response.Children[0].Value.(int64))
if resultCode == 0 { // No error
return nil
}
return &Error{ResultCode: resultCode, MatchedDN: response.Children[1].Value.(string),
Err: fmt.Errorf("%s", response.Children[2].Value.(string))}
}
}
return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format")}
}
// NewError creates an LDAP error with the given code and underlying error
func NewError(resultCode uint16, err error) error {
return &Error{ResultCode: resultCode, Err: err}
}
// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
func IsErrorWithCode(err error, desiredResultCode uint16) bool {
if err == nil {
return false
}
serverError, ok := err.(*Error)
if !ok {
return false
}
return serverError.ResultCode == desiredResultCode
}

465
vendor/github.com/go-ldap/ldap/filter.go generated vendored Normal file
View File

@ -0,0 +1,465 @@
package ldap
import (
"bytes"
hexpac "encoding/hex"
"errors"
"fmt"
"strings"
"unicode/utf8"
"gopkg.in/asn1-ber.v1"
)
// Filter choices
const (
FilterAnd = 0
FilterOr = 1
FilterNot = 2
FilterEqualityMatch = 3
FilterSubstrings = 4
FilterGreaterOrEqual = 5
FilterLessOrEqual = 6
FilterPresent = 7
FilterApproxMatch = 8
FilterExtensibleMatch = 9
)
// FilterMap contains human readable descriptions of Filter choices
var FilterMap = map[uint64]string{
FilterAnd: "And",
FilterOr: "Or",
FilterNot: "Not",
FilterEqualityMatch: "Equality Match",
FilterSubstrings: "Substrings",
FilterGreaterOrEqual: "Greater Or Equal",
FilterLessOrEqual: "Less Or Equal",
FilterPresent: "Present",
FilterApproxMatch: "Approx Match",
FilterExtensibleMatch: "Extensible Match",
}
// SubstringFilter options
const (
FilterSubstringsInitial = 0
FilterSubstringsAny = 1
FilterSubstringsFinal = 2
)
// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
var FilterSubstringsMap = map[uint64]string{
FilterSubstringsInitial: "Substrings Initial",
FilterSubstringsAny: "Substrings Any",
FilterSubstringsFinal: "Substrings Final",
}
// MatchingRuleAssertion choices
const (
MatchingRuleAssertionMatchingRule = 1
MatchingRuleAssertionType = 2
MatchingRuleAssertionMatchValue = 3
MatchingRuleAssertionDNAttributes = 4
)
// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
var MatchingRuleAssertionMap = map[uint64]string{
MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
MatchingRuleAssertionType: "Matching Rule Assertion Type",
MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
}
// CompileFilter converts a string representation of a filter into a BER-encoded packet
func CompileFilter(filter string) (*ber.Packet, error) {
if len(filter) == 0 || filter[0] != '(' {
return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
}
packet, pos, err := compileFilter(filter, 1)
if err != nil {
return nil, err
}
switch {
case pos > len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
case pos < len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
}
return packet, nil
}
// DecompileFilter converts a packet representation of a filter into a string representation
func DecompileFilter(packet *ber.Packet) (ret string, err error) {
defer func() {
if r := recover(); r != nil {
err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
}
}()
ret = "("
err = nil
childStr := ""
switch packet.Tag {
case FilterAnd:
ret += "&"
for _, child := range packet.Children {
childStr, err = DecompileFilter(child)
if err != nil {
return
}
ret += childStr
}
case FilterOr:
ret += "|"
for _, child := range packet.Children {
childStr, err = DecompileFilter(child)
if err != nil {
return
}
ret += childStr
}
case FilterNot:
ret += "!"
childStr, err = DecompileFilter(packet.Children[0])
if err != nil {
return
}
ret += childStr
case FilterSubstrings:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "="
for i, child := range packet.Children[1].Children {
if i == 0 && child.Tag != FilterSubstringsInitial {
ret += "*"
}
ret += EscapeFilter(ber.DecodeString(child.Data.Bytes()))
if child.Tag != FilterSubstringsFinal {
ret += "*"
}
}
case FilterEqualityMatch:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterGreaterOrEqual:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += ">="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterLessOrEqual:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "<="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterPresent:
ret += ber.DecodeString(packet.Data.Bytes())
ret += "=*"
case FilterApproxMatch:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "~="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterExtensibleMatch:
attr := ""
dnAttributes := false
matchingRule := ""
value := ""
for _, child := range packet.Children {
switch child.Tag {
case MatchingRuleAssertionMatchingRule:
matchingRule = ber.DecodeString(child.Data.Bytes())
case MatchingRuleAssertionType:
attr = ber.DecodeString(child.Data.Bytes())
case MatchingRuleAssertionMatchValue:
value = ber.DecodeString(child.Data.Bytes())
case MatchingRuleAssertionDNAttributes:
dnAttributes = child.Value.(bool)
}
}
if len(attr) > 0 {
ret += attr
}
if dnAttributes {
ret += ":dn"
}
if len(matchingRule) > 0 {
ret += ":"
ret += matchingRule
}
ret += ":="
ret += EscapeFilter(value)
}
ret += ")"
return
}
func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
for pos < len(filter) && filter[pos] == '(' {
child, newPos, err := compileFilter(filter, pos+1)
if err != nil {
return pos, err
}
pos = newPos
parent.AppendChild(child)
}
if pos == len(filter) {
return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
}
return pos + 1, nil
}
func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
var (
packet *ber.Packet
err error
)
defer func() {
if r := recover(); r != nil {
err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
}
}()
newPos := pos
currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
switch currentRune {
case utf8.RuneError:
return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
case '(':
packet, newPos, err = compileFilter(filter, pos+currentWidth)
newPos++
return packet, newPos, err
case '&':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
return packet, newPos, err
case '|':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
return packet, newPos, err
case '!':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
var child *ber.Packet
child, newPos, err = compileFilter(filter, pos+currentWidth)
packet.AppendChild(child)
return packet, newPos, err
default:
const (
stateReadingAttr = 0
stateReadingExtensibleMatchingRule = 1
stateReadingCondition = 2
)
state := stateReadingAttr
attribute := ""
extensibleDNAttributes := false
extensibleMatchingRule := ""
condition := ""
for newPos < len(filter) {
remainingFilter := filter[newPos:]
currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
if currentRune == ')' {
break
}
if currentRune == utf8.RuneError {
return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
}
switch state {
case stateReadingAttr:
switch {
// Extensible rule, with only DN-matching
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
extensibleDNAttributes = true
state = stateReadingCondition
newPos += 5
// Extensible rule, with DN-matching and a matching OID
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
extensibleDNAttributes = true
state = stateReadingExtensibleMatchingRule
newPos += 4
// Extensible rule, with attr only
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
state = stateReadingCondition
newPos += 2
// Extensible rule, with no DN attribute matching
case currentRune == ':':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
state = stateReadingExtensibleMatchingRule
newPos++
// Equality condition
case currentRune == '=':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
state = stateReadingCondition
newPos++
// Greater-than or equal
case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
state = stateReadingCondition
newPos += 2
// Less-than or equal
case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
state = stateReadingCondition
newPos += 2
// Approx
case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
state = stateReadingCondition
newPos += 2
// Still reading the attribute name
default:
attribute += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
}
case stateReadingExtensibleMatchingRule:
switch {
// Matching rule OID is done
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
state = stateReadingCondition
newPos += 2
// Still reading the matching rule oid
default:
extensibleMatchingRule += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
}
case stateReadingCondition:
// append to the condition
condition += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
}
}
if newPos == len(filter) {
err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
return packet, newPos, err
}
if packet == nil {
err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
return packet, newPos, err
}
switch {
case packet.Tag == FilterExtensibleMatch:
// MatchingRuleAssertion ::= SEQUENCE {
// matchingRule [1] MatchingRuleID OPTIONAL,
// type [2] AttributeDescription OPTIONAL,
// matchValue [3] AssertionValue,
// dnAttributes [4] BOOLEAN DEFAULT FALSE
// }
// Include the matching rule oid, if specified
if len(extensibleMatchingRule) > 0 {
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
}
// Include the attribute, if specified
if len(attribute) > 0 {
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType]))
}
// Add the value (only required child)
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
if encodeErr != nil {
return packet, newPos, encodeErr
}
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
// Defaults to false, so only include in the sequence if true
if extensibleDNAttributes {
packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
}
case packet.Tag == FilterEqualityMatch && condition == "*":
packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent])
case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"):
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
packet.Tag = FilterSubstrings
packet.Description = FilterMap[uint64(packet.Tag)]
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
parts := strings.Split(condition, "*")
for i, part := range parts {
if part == "" {
continue
}
var tag ber.Tag
switch i {
case 0:
tag = FilterSubstringsInitial
case len(parts) - 1:
tag = FilterSubstringsFinal
default:
tag = FilterSubstringsAny
}
encodedString, encodeErr := escapedStringToEncodedBytes(part)
if encodeErr != nil {
return packet, newPos, encodeErr
}
seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
}
packet.AppendChild(seq)
default:
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
if encodeErr != nil {
return packet, newPos, encodeErr
}
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
}
newPos += currentWidth
return packet, newPos, err
}
}
// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
func escapedStringToEncodedBytes(escapedString string) (string, error) {
var buffer bytes.Buffer
i := 0
for i < len(escapedString) {
currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:])
if currentRune == utf8.RuneError {
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i))
}
// Check for escaped hex characters and convert them to their literal value for transport.
if currentRune == '\\' {
// http://tools.ietf.org/search/rfc4515
// \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
// being a member of UTF1SUBSET.
if i+2 > len(escapedString) {
return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
}
escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
if decodeErr != nil {
return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
}
buffer.WriteByte(escByte[0])
i += 2 // +1 from end of loop, so 3 total for \xx.
} else {
buffer.WriteRune(currentRune)
}
i += currentWidth
}
return buffer.String(), nil
}

338
vendor/github.com/go-ldap/ldap/ldap.go generated vendored Normal file
View File

@ -0,0 +1,338 @@
package ldap
import (
"errors"
"fmt"
"io/ioutil"
"os"
"gopkg.in/asn1-ber.v1"
)
// LDAP Application Codes
const (
ApplicationBindRequest = 0
ApplicationBindResponse = 1
ApplicationUnbindRequest = 2
ApplicationSearchRequest = 3
ApplicationSearchResultEntry = 4
ApplicationSearchResultDone = 5
ApplicationModifyRequest = 6
ApplicationModifyResponse = 7
ApplicationAddRequest = 8
ApplicationAddResponse = 9
ApplicationDelRequest = 10
ApplicationDelResponse = 11
ApplicationModifyDNRequest = 12
ApplicationModifyDNResponse = 13
ApplicationCompareRequest = 14
ApplicationCompareResponse = 15
ApplicationAbandonRequest = 16
ApplicationSearchResultReference = 19
ApplicationExtendedRequest = 23
ApplicationExtendedResponse = 24
)
// ApplicationMap contains human readable descriptions of LDAP Application Codes
var ApplicationMap = map[uint8]string{
ApplicationBindRequest: "Bind Request",
ApplicationBindResponse: "Bind Response",
ApplicationUnbindRequest: "Unbind Request",
ApplicationSearchRequest: "Search Request",
ApplicationSearchResultEntry: "Search Result Entry",
ApplicationSearchResultDone: "Search Result Done",
ApplicationModifyRequest: "Modify Request",
ApplicationModifyResponse: "Modify Response",
ApplicationAddRequest: "Add Request",
ApplicationAddResponse: "Add Response",
ApplicationDelRequest: "Del Request",
ApplicationDelResponse: "Del Response",
ApplicationModifyDNRequest: "Modify DN Request",
ApplicationModifyDNResponse: "Modify DN Response",
ApplicationCompareRequest: "Compare Request",
ApplicationCompareResponse: "Compare Response",
ApplicationAbandonRequest: "Abandon Request",
ApplicationSearchResultReference: "Search Result Reference",
ApplicationExtendedRequest: "Extended Request",
ApplicationExtendedResponse: "Extended Response",
}
// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
const (
BeheraPasswordExpired = 0
BeheraAccountLocked = 1
BeheraChangeAfterReset = 2
BeheraPasswordModNotAllowed = 3
BeheraMustSupplyOldPassword = 4
BeheraInsufficientPasswordQuality = 5
BeheraPasswordTooShort = 6
BeheraPasswordTooYoung = 7
BeheraPasswordInHistory = 8
)
// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
var BeheraPasswordPolicyErrorMap = map[int8]string{
BeheraPasswordExpired: "Password expired",
BeheraAccountLocked: "Account locked",
BeheraChangeAfterReset: "Password must be changed",
BeheraPasswordModNotAllowed: "Policy prevents password modification",
BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
BeheraInsufficientPasswordQuality: "Password fails quality checks",
BeheraPasswordTooShort: "Password is too short for policy",
BeheraPasswordTooYoung: "Password has been changed too recently",
BeheraPasswordInHistory: "New password is in list of old passwords",
}
// Adds descriptions to an LDAP Response packet for debugging
func addLDAPDescriptions(packet *ber.Packet) (err error) {
defer func() {
if r := recover(); r != nil {
err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions"))
}
}()
packet.Description = "LDAP Response"
packet.Children[0].Description = "Message ID"
application := uint8(packet.Children[1].Tag)
packet.Children[1].Description = ApplicationMap[application]
switch application {
case ApplicationBindRequest:
err = addRequestDescriptions(packet)
case ApplicationBindResponse:
err = addDefaultLDAPResponseDescriptions(packet)
case ApplicationUnbindRequest:
err = addRequestDescriptions(packet)
case ApplicationSearchRequest:
err = addRequestDescriptions(packet)
case ApplicationSearchResultEntry:
packet.Children[1].Children[0].Description = "Object Name"
packet.Children[1].Children[1].Description = "Attributes"
for _, child := range packet.Children[1].Children[1].Children {
child.Description = "Attribute"
child.Children[0].Description = "Attribute Name"
child.Children[1].Description = "Attribute Values"
for _, grandchild := range child.Children[1].Children {
grandchild.Description = "Attribute Value"
}
}
if len(packet.Children) == 3 {
err = addControlDescriptions(packet.Children[2])
}
case ApplicationSearchResultDone:
err = addDefaultLDAPResponseDescriptions(packet)
case ApplicationModifyRequest:
err = addRequestDescriptions(packet)
case ApplicationModifyResponse:
case ApplicationAddRequest:
err = addRequestDescriptions(packet)
case ApplicationAddResponse:
case ApplicationDelRequest:
err = addRequestDescriptions(packet)
case ApplicationDelResponse:
case ApplicationModifyDNRequest:
err = addRequestDescriptions(packet)
case ApplicationModifyDNResponse:
case ApplicationCompareRequest:
err = addRequestDescriptions(packet)
case ApplicationCompareResponse:
case ApplicationAbandonRequest:
err = addRequestDescriptions(packet)
case ApplicationSearchResultReference:
case ApplicationExtendedRequest:
err = addRequestDescriptions(packet)
case ApplicationExtendedResponse:
}
return err
}
func addControlDescriptions(packet *ber.Packet) error {
packet.Description = "Controls"
for _, child := range packet.Children {
var value *ber.Packet
controlType := ""
child.Description = "Control"
switch len(child.Children) {
case 0:
// at least one child is required for control type
return fmt.Errorf("at least one child is required for control type")
case 1:
// just type, no criticality or value
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
case 2:
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
// Children[1] could be criticality or value (both are optional)
// duck-type on whether this is a boolean
if _, ok := child.Children[1].Value.(bool); ok {
child.Children[1].Description = "Criticality"
} else {
child.Children[1].Description = "Control Value"
value = child.Children[1]
}
case 3:
// criticality and value present
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
child.Children[1].Description = "Criticality"
child.Children[2].Description = "Control Value"
value = child.Children[2]
default:
// more than 3 children is invalid
return fmt.Errorf("more than 3 children for control packet found")
}
if value == nil {
continue
}
switch controlType {
case ControlTypePaging:
value.Description += " (Paging)"
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
value.AppendChild(valueChildren)
}
value.Children[0].Description = "Real Search Control Value"
value.Children[0].Children[0].Description = "Paging Size"
value.Children[0].Children[1].Description = "Cookie"
case ControlTypeBeheraPasswordPolicy:
value.Description += " (Password Policy - Behera Draft)"
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
value.AppendChild(valueChildren)
}
sequence := value.Children[0]
for _, child := range sequence.Children {
if child.Tag == 0 {
//Warning
warningPacket := child.Children[0]
packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int64)
if ok {
if warningPacket.Tag == 0 {
//timeBeforeExpiration
value.Description += " (TimeBeforeExpiration)"
warningPacket.Value = val
} else if warningPacket.Tag == 1 {
//graceAuthNsRemaining
value.Description += " (GraceAuthNsRemaining)"
warningPacket.Value = val
}
}
} else if child.Tag == 1 {
// Error
packet, err := ber.DecodePacketErr(child.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int8)
if !ok {
val = -1
}
child.Description = "Error"
child.Value = val
}
}
}
}
return nil
}
func addRequestDescriptions(packet *ber.Packet) error {
packet.Description = "LDAP Request"
packet.Children[0].Description = "Message ID"
packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
if len(packet.Children) == 3 {
return addControlDescriptions(packet.Children[2])
}
return nil
}
func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error {
err := GetLDAPError(packet)
packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")"
packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")"
packet.Children[1].Children[2].Description = "Error Message"
if len(packet.Children[1].Children) > 3 {
packet.Children[1].Children[3].Description = "Referral"
}
if len(packet.Children) == 3 {
return addControlDescriptions(packet.Children[2])
}
return nil
}
// DebugBinaryFile reads and prints packets from the given filename
func DebugBinaryFile(fileName string) error {
file, err := ioutil.ReadFile(fileName)
if err != nil {
return NewError(ErrorDebugging, err)
}
ber.PrintBytes(os.Stdout, file, "")
packet, err := ber.DecodePacketErr(file)
if err != nil {
return fmt.Errorf("failed to decode packet: %s", err)
}
if err := addLDAPDescriptions(packet); err != nil {
return err
}
ber.PrintPacket(packet)
return nil
}
var hex = "0123456789abcdef"
func mustEscape(c byte) bool {
return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
}
// EscapeFilter escapes from the provided LDAP filter string the special
// characters in the set `()*\` and those out of the range 0 < c < 0x80,
// as defined in RFC4515.
func EscapeFilter(filter string) string {
escape := 0
for i := 0; i < len(filter); i++ {
if mustEscape(filter[i]) {
escape++
}
}
if escape == 0 {
return filter
}
buf := make([]byte, len(filter)+escape*2)
for i, j := 0, 0; i < len(filter); i++ {
c := filter[i]
if mustEscape(c) {
buf[j+0] = '\\'
buf[j+1] = hex[c>>4]
buf[j+2] = hex[c&0xf]
j += 3
} else {
buf[j] = c
j++
}
}
return string(buf)
}

104
vendor/github.com/go-ldap/ldap/moddn.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
// Package ldap - moddn.go contains ModifyDN functionality
//
// https://tools.ietf.org/html/rfc4511
// ModifyDNRequest ::= [APPLICATION 12] SEQUENCE {
// entry LDAPDN,
// newrdn RelativeLDAPDN,
// deleteoldrdn BOOLEAN,
// newSuperior [0] LDAPDN OPTIONAL }
//
//
package ldap
import (
"errors"
"log"
"gopkg.in/asn1-ber.v1"
)
// ModifyDNRequest holds the request to modify a DN
type ModifyDNRequest struct {
DN string
NewRDN string
DeleteOldRDN bool
NewSuperior string
}
// NewModifyDNRequest creates a new request which can be passed to ModifyDN().
//
// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an
// empty string for just changing the object's RDN.
//
// For moving the object without renaming, the "rdn" must be the first
// RDN of the given DN.
//
// A call like
// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
// will setup the request to just rename uid=someone,dc=example,dc=org to
// uid=newname,dc=example,dc=org.
func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest {
return &ModifyDNRequest{
DN: dn,
NewRDN: rdn,
DeleteOldRDN: delOld,
NewSuperior: newSup,
}
}
func (m ModifyDNRequest) encode() *ber.Packet {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request")
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN"))
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.NewRDN, "New RDN"))
request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, m.DeleteOldRDN, "Delete old RDN"))
if m.NewSuperior != "" {
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, m.NewSuperior, "New Superior"))
}
return request
}
// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument
// to NewModifyDNRequest() is not "").
func (l *Conn) ModifyDN(m *ModifyDNRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
packet.AppendChild(m.encode())
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return err
}
ber.PrintPacket(packet)
}
if packet.Children[1].Tag == ApplicationModifyDNResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
l.Debug.Printf("%d: returning", msgCtx.id)
return nil
}

173
vendor/github.com/go-ldap/ldap/modify.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// File contains Modify functionality
//
// https://tools.ietf.org/html/rfc4511
//
// ModifyRequest ::= [APPLICATION 6] SEQUENCE {
// object LDAPDN,
// changes SEQUENCE OF change SEQUENCE {
// operation ENUMERATED {
// add (0),
// delete (1),
// replace (2),
// ... },
// modification PartialAttribute } }
//
// PartialAttribute ::= SEQUENCE {
// type AttributeDescription,
// vals SET OF value AttributeValue }
//
// AttributeDescription ::= LDAPString
// -- Constrained to <attributedescription>
// -- [RFC4512]
//
// AttributeValue ::= OCTET STRING
//
package ldap
import (
"errors"
"log"
"gopkg.in/asn1-ber.v1"
)
// Change operation choices
const (
AddAttribute = 0
DeleteAttribute = 1
ReplaceAttribute = 2
)
// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type PartialAttribute struct {
// Type is the type of the partial attribute
Type string
// Vals are the values of the partial attribute
Vals []string
}
func (p *PartialAttribute) encode() *ber.Packet {
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
for _, value := range p.Vals {
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
}
seq.AppendChild(set)
return seq
}
// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type Change struct {
// Operation is the type of change to be made
Operation uint
// Modification is the attribute to be modified
Modification PartialAttribute
}
func (c *Change) encode() *ber.Packet {
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation"))
change.AppendChild(c.Modification.encode())
return change
}
// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type ModifyRequest struct {
// DN is the distinguishedName of the directory entry to modify
DN string
// Changes contain the attributes to modify
Changes []Change
// Controls hold optional controls to send with the request
Controls []Control
}
// Add appends the given attribute to the list of changes to be made
func (m *ModifyRequest) Add(attrType string, attrVals []string) {
m.appendChange(AddAttribute, attrType, attrVals)
}
// Delete appends the given attribute to the list of changes to be made
func (m *ModifyRequest) Delete(attrType string, attrVals []string) {
m.appendChange(DeleteAttribute, attrType, attrVals)
}
// Replace appends the given attribute to the list of changes to be made
func (m *ModifyRequest) Replace(attrType string, attrVals []string) {
m.appendChange(ReplaceAttribute, attrType, attrVals)
}
func (m *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) {
m.Changes = append(m.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}})
}
func (m ModifyRequest) encode() *ber.Packet {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN"))
changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
for _, change := range m.Changes {
changes.AppendChild(change.encode())
}
request.AppendChild(changes)
return request
}
// NewModifyRequest creates a modify request for the given DN
func NewModifyRequest(
dn string,
controls []Control,
) *ModifyRequest {
return &ModifyRequest{
DN: dn,
Controls: controls,
}
}
// Modify performs the ModifyRequest
func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
packet.AppendChild(modifyRequest.encode())
if len(modifyRequest.Controls) > 0 {
packet.AppendChild(encodeControls(modifyRequest.Controls))
}
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return err
}
ber.PrintPacket(packet)
}
if packet.Children[1].Tag == ApplicationModifyResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
l.Debug.Printf("%d: returning", msgCtx.id)
return nil
}

157
vendor/github.com/go-ldap/ldap/passwdmodify.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
// This file contains the password modify extended operation as specified in rfc 3062
//
// https://tools.ietf.org/html/rfc3062
//
package ldap
import (
"errors"
"fmt"
"gopkg.in/asn1-ber.v1"
)
const (
passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
)
// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
type PasswordModifyRequest struct {
// UserIdentity is an optional string representation of the user associated with the request.
// This string may or may not be an LDAPDN [RFC2253].
// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
UserIdentity string
// OldPassword, if present, contains the user's current password
OldPassword string
// NewPassword, if present, contains the desired password for this user
NewPassword string
}
// PasswordModifyResult holds the server response to a PasswordModifyRequest
type PasswordModifyResult struct {
// GeneratedPassword holds a password generated by the server, if present
GeneratedPassword string
// Referral are the returned referral
Referral string
}
func (r *PasswordModifyRequest) encode() (*ber.Packet, error) {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
if r.UserIdentity != "" {
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity"))
}
if r.OldPassword != "" {
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password"))
}
if r.NewPassword != "" {
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password"))
}
extendedRequestValue.AppendChild(passwordModifyRequestValue)
request.AppendChild(extendedRequestValue)
return request, nil
}
// NewPasswordModifyRequest creates a new PasswordModifyRequest
//
// According to the RFC 3602:
// userIdentity is a string representing the user associated with the request.
// This string may or may not be an LDAPDN (RFC 2253).
// If userIdentity is empty then the operation will act on the user associated
// with the session.
//
// oldPassword is the current user's password, it can be empty or it can be
// needed depending on the session user access rights (usually an administrator
// can change a user's password without knowing the current one) and the
// password policy (see pwdSafeModify password policy's attribute)
//
// newPassword is the desired user's password. If empty the server can return
// an error or generate a new password that will be available in the
// PasswordModifyResult.GeneratedPassword
//
func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
return &PasswordModifyRequest{
UserIdentity: userIdentity,
OldPassword: oldPassword,
NewPassword: newPassword,
}
}
// PasswordModify performs the modification request
func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
encodedPasswordModifyRequest, err := passwordModifyRequest.encode()
if err != nil {
return nil, err
}
packet.AppendChild(encodedPasswordModifyRequest)
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return nil, err
}
defer l.finishMessage(msgCtx)
result := &PasswordModifyResult{}
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return nil, err
}
if packet == nil {
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return nil, err
}
ber.PrintPacket(packet)
}
if packet.Children[1].Tag == ApplicationExtendedResponse {
err := GetLDAPError(packet)
if err != nil {
if IsErrorWithCode(err, LDAPResultReferral) {
for _, child := range packet.Children[1].Children {
if child.Tag == 3 {
result.Referral = child.Children[0].Value.(string)
}
}
}
return result, err
}
} else {
return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag))
}
extendedResponse := packet.Children[1]
for _, child := range extendedResponse.Children {
if child.Tag == 11 {
passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
if len(passwordModifyResponseValue.Children) == 1 {
if passwordModifyResponseValue.Children[0].Tag == 0 {
result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
}
}
}
}
return result, nil
}

450
vendor/github.com/go-ldap/ldap/search.go generated vendored Normal file
View File

@ -0,0 +1,450 @@
// File contains Search functionality
//
// https://tools.ietf.org/html/rfc4511
//
// SearchRequest ::= [APPLICATION 3] SEQUENCE {
// baseObject LDAPDN,
// scope ENUMERATED {
// baseObject (0),
// singleLevel (1),
// wholeSubtree (2),
// ... },
// derefAliases ENUMERATED {
// neverDerefAliases (0),
// derefInSearching (1),
// derefFindingBaseObj (2),
// derefAlways (3) },
// sizeLimit INTEGER (0 .. maxInt),
// timeLimit INTEGER (0 .. maxInt),
// typesOnly BOOLEAN,
// filter Filter,
// attributes AttributeSelection }
//
// AttributeSelection ::= SEQUENCE OF selector LDAPString
// -- The LDAPString is constrained to
// -- <attributeSelector> in Section 4.5.1.8
//
// Filter ::= CHOICE {
// and [0] SET SIZE (1..MAX) OF filter Filter,
// or [1] SET SIZE (1..MAX) OF filter Filter,
// not [2] Filter,
// equalityMatch [3] AttributeValueAssertion,
// substrings [4] SubstringFilter,
// greaterOrEqual [5] AttributeValueAssertion,
// lessOrEqual [6] AttributeValueAssertion,
// present [7] AttributeDescription,
// approxMatch [8] AttributeValueAssertion,
// extensibleMatch [9] MatchingRuleAssertion,
// ... }
//
// SubstringFilter ::= SEQUENCE {
// type AttributeDescription,
// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE {
// initial [0] AssertionValue, -- can occur at most once
// any [1] AssertionValue,
// final [2] AssertionValue } -- can occur at most once
// }
//
// MatchingRuleAssertion ::= SEQUENCE {
// matchingRule [1] MatchingRuleId OPTIONAL,
// type [2] AttributeDescription OPTIONAL,
// matchValue [3] AssertionValue,
// dnAttributes [4] BOOLEAN DEFAULT FALSE }
//
//
package ldap
import (
"errors"
"fmt"
"sort"
"strings"
"gopkg.in/asn1-ber.v1"
)
// scope choices
const (
ScopeBaseObject = 0
ScopeSingleLevel = 1
ScopeWholeSubtree = 2
)
// ScopeMap contains human readable descriptions of scope choices
var ScopeMap = map[int]string{
ScopeBaseObject: "Base Object",
ScopeSingleLevel: "Single Level",
ScopeWholeSubtree: "Whole Subtree",
}
// derefAliases
const (
NeverDerefAliases = 0
DerefInSearching = 1
DerefFindingBaseObj = 2
DerefAlways = 3
)
// DerefMap contains human readable descriptions of derefAliases choices
var DerefMap = map[int]string{
NeverDerefAliases: "NeverDerefAliases",
DerefInSearching: "DerefInSearching",
DerefFindingBaseObj: "DerefFindingBaseObj",
DerefAlways: "DerefAlways",
}
// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
// same input map of attributes, the output entry will contain the same order of attributes
func NewEntry(dn string, attributes map[string][]string) *Entry {
var attributeNames []string
for attributeName := range attributes {
attributeNames = append(attributeNames, attributeName)
}
sort.Strings(attributeNames)
var encodedAttributes []*EntryAttribute
for _, attributeName := range attributeNames {
encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
}
return &Entry{
DN: dn,
Attributes: encodedAttributes,
}
}
// Entry represents a single search result entry
type Entry struct {
// DN is the distinguished name of the entry
DN string
// Attributes are the returned attributes for the entry
Attributes []*EntryAttribute
}
// GetAttributeValues returns the values for the named attribute, or an empty list
func (e *Entry) GetAttributeValues(attribute string) []string {
for _, attr := range e.Attributes {
if attr.Name == attribute {
return attr.Values
}
}
return []string{}
}
// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
for _, attr := range e.Attributes {
if attr.Name == attribute {
return attr.ByteValues
}
}
return [][]byte{}
}
// GetAttributeValue returns the first value for the named attribute, or ""
func (e *Entry) GetAttributeValue(attribute string) string {
values := e.GetAttributeValues(attribute)
if len(values) == 0 {
return ""
}
return values[0]
}
// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
func (e *Entry) GetRawAttributeValue(attribute string) []byte {
values := e.GetRawAttributeValues(attribute)
if len(values) == 0 {
return []byte{}
}
return values[0]
}
// Print outputs a human-readable description
func (e *Entry) Print() {
fmt.Printf("DN: %s\n", e.DN)
for _, attr := range e.Attributes {
attr.Print()
}
}
// PrettyPrint outputs a human-readable description indenting
func (e *Entry) PrettyPrint(indent int) {
fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
for _, attr := range e.Attributes {
attr.PrettyPrint(indent + 2)
}
}
// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
func NewEntryAttribute(name string, values []string) *EntryAttribute {
var bytes [][]byte
for _, value := range values {
bytes = append(bytes, []byte(value))
}
return &EntryAttribute{
Name: name,
Values: values,
ByteValues: bytes,
}
}
// EntryAttribute holds a single attribute
type EntryAttribute struct {
// Name is the name of the attribute
Name string
// Values contain the string values of the attribute
Values []string
// ByteValues contain the raw values of the attribute
ByteValues [][]byte
}
// Print outputs a human-readable description
func (e *EntryAttribute) Print() {
fmt.Printf("%s: %s\n", e.Name, e.Values)
}
// PrettyPrint outputs a human-readable description with indenting
func (e *EntryAttribute) PrettyPrint(indent int) {
fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
}
// SearchResult holds the server's response to a search request
type SearchResult struct {
// Entries are the returned entries
Entries []*Entry
// Referrals are the returned referrals
Referrals []string
// Controls are the returned controls
Controls []Control
}
// Print outputs a human-readable description
func (s *SearchResult) Print() {
for _, entry := range s.Entries {
entry.Print()
}
}
// PrettyPrint outputs a human-readable description with indenting
func (s *SearchResult) PrettyPrint(indent int) {
for _, entry := range s.Entries {
entry.PrettyPrint(indent)
}
}
// SearchRequest represents a search request to send to the server
type SearchRequest struct {
BaseDN string
Scope int
DerefAliases int
SizeLimit int
TimeLimit int
TypesOnly bool
Filter string
Attributes []string
Controls []Control
}
func (s *SearchRequest) encode() (*ber.Packet, error) {
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN"))
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope"))
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases"))
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit"))
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit"))
request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only"))
// compile and encode filter
filterPacket, err := CompileFilter(s.Filter)
if err != nil {
return nil, err
}
request.AppendChild(filterPacket)
// encode attributes
attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
for _, attribute := range s.Attributes {
attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
}
request.AppendChild(attributesPacket)
return request, nil
}
// NewSearchRequest creates a new search request
func NewSearchRequest(
BaseDN string,
Scope, DerefAliases, SizeLimit, TimeLimit int,
TypesOnly bool,
Filter string,
Attributes []string,
Controls []Control,
) *SearchRequest {
return &SearchRequest{
BaseDN: BaseDN,
Scope: Scope,
DerefAliases: DerefAliases,
SizeLimit: SizeLimit,
TimeLimit: TimeLimit,
TypesOnly: TypesOnly,
Filter: Filter,
Attributes: Attributes,
Controls: Controls,
}
}
// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
// The following four cases are possible given the arguments:
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
var pagingControl *ControlPaging
control := FindControl(searchRequest.Controls, ControlTypePaging)
if control == nil {
pagingControl = NewControlPaging(pagingSize)
searchRequest.Controls = append(searchRequest.Controls, pagingControl)
} else {
castControl, ok := control.(*ControlPaging)
if !ok {
return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control)
}
if castControl.PagingSize != pagingSize {
return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
}
pagingControl = castControl
}
searchResult := new(SearchResult)
for {
result, err := l.Search(searchRequest)
l.Debug.Printf("Looking for Paging Control...")
if err != nil {
return searchResult, err
}
if result == nil {
return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
}
for _, entry := range result.Entries {
searchResult.Entries = append(searchResult.Entries, entry)
}
for _, referral := range result.Referrals {
searchResult.Referrals = append(searchResult.Referrals, referral)
}
for _, control := range result.Controls {
searchResult.Controls = append(searchResult.Controls, control)
}
l.Debug.Printf("Looking for Paging Control...")
pagingResult := FindControl(result.Controls, ControlTypePaging)
if pagingResult == nil {
pagingControl = nil
l.Debug.Printf("Could not find paging control. Breaking...")
break
}
cookie := pagingResult.(*ControlPaging).Cookie
if len(cookie) == 0 {
pagingControl = nil
l.Debug.Printf("Could not find cookie. Breaking...")
break
}
pagingControl.SetCookie(cookie)
}
if pagingControl != nil {
l.Debug.Printf("Abandoning Paging...")
pagingControl.PagingSize = 0
l.Search(searchRequest)
}
return searchResult, nil
}
// Search performs the given search request
func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
// encode search request
encodedSearchRequest, err := searchRequest.encode()
if err != nil {
return nil, err
}
packet.AppendChild(encodedSearchRequest)
// encode search controls
if len(searchRequest.Controls) > 0 {
packet.AppendChild(encodeControls(searchRequest.Controls))
}
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessage(packet)
if err != nil {
return nil, err
}
defer l.finishMessage(msgCtx)
result := &SearchResult{
Entries: make([]*Entry, 0),
Referrals: make([]string, 0),
Controls: make([]Control, 0)}
foundSearchResultDone := false
for !foundSearchResultDone {
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return nil, err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
return nil, err
}
ber.PrintPacket(packet)
}
switch packet.Children[1].Tag {
case 4:
entry := new(Entry)
entry.DN = packet.Children[1].Children[0].Value.(string)
for _, child := range packet.Children[1].Children[1].Children {
attr := new(EntryAttribute)
attr.Name = child.Children[0].Value.(string)
for _, value := range child.Children[1].Children {
attr.Values = append(attr.Values, value.Value.(string))
attr.ByteValues = append(attr.ByteValues, value.ByteValue)
}
entry.Attributes = append(entry.Attributes, attr)
}
result.Entries = append(result.Entries, entry)
case 5:
err := GetLDAPError(packet)
if err != nil {
return nil, err
}
if len(packet.Children) == 3 {
for _, child := range packet.Children[2].Children {
decodedChild, err := DecodeControl(child)
if err != nil {
return nil, fmt.Errorf("failed to decode child control: %s", err)
}
result.Controls = append(result.Controls, decodedChild)
}
}
foundSearchResultDone = true
case 19:
result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
}
}
l.Debug.Printf("%d: returning", msgCtx.id)
return result, nil
}

View File

@ -11,6 +11,7 @@ John Shahid <jvshahid@gmail.com>
John Tuley <john@tuley.org>
Laurent <laurent@adyoulike.com>
Patrick Lee <patrick@dropbox.com>
Peter Edge <peter.edge@gmail.com>
Roger Johansson <rogeralsing@gmail.com>
Sam Nguyen <sam.nguyen@sendgrid.com>
Sergio Arbeo <serabe@gmail.com>

View File

@ -1,5 +0,0 @@
The contributors to the Go protobuf repository:
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -1,7 +1,6 @@
Protocol Buffers for Go with Gadgets
Copyright (c) 2013, The GoGo Authors. All rights reserved.
http://github.com/gogo/protobuf
Protocol Buffers for Go with Gadgets
Go support for Protocol Buffers - Google's data interchange format

View File

@ -38,6 +38,6 @@ test: install generate-test-pbs
generate-test-pbs:
make install
make -C testdata
protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto
make -C test_proto
make -C proto3_proto
make

View File

@ -35,22 +35,39 @@
package proto
import (
"fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
func Clone(src Message) Message {
in := reflect.ValueOf(src)
if in.IsNil() {
return pb
return src
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
dst := out.Interface().(Message)
Merge(dst, src)
return dst
}
// Merger is the interface representing objects that can merge messages of the same type.
type Merger interface {
// Merge merges src into this message.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
//
// Merge may panic if called with a different argument type than the receiver.
Merge(src Message)
}
// generatedMerger is the custom merge method that generated protos will have.
// We must add this method since a generate Merge method will conflict with
// many existing protos that have a Merge data field already defined.
type generatedMerger interface {
XXX_Merge(src Message)
}
// Merge merges src into dst.
@ -58,17 +75,24 @@ func Clone(pb Message) Message {
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
if m, ok := dst.(Merger); ok {
m.Merge(src)
return
}
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return // Merge from nil src is a noop
}
if m, ok := dst.(generatedMerger); ok {
m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
@ -89,7 +113,7 @@ func mergeStruct(out, in reflect.Value) {
bIn := emIn.GetExtensions()
bOut := emOut.GetExtensions()
*bOut = append(*bOut, *bIn...)
} else if emIn, ok := extendable(in.Addr().Interface()); ok {
} else if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {

39
vendor/github.com/gogo/protobuf/proto/custom_gogo.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import "reflect"
type custom interface {
Marshal() ([]byte, error)
Unmarshal(data []byte) error
Size() int
}
var customType = reflect.TypeOf((*custom)(nil)).Elem()

View File

@ -39,8 +39,6 @@ import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
@ -192,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
@ -267,9 +260,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@ -311,81 +301,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
// Unmarshal implementations should not clear the receiver.
// Any unmarshaled data should be merged into the receiver.
// Callers of Unmarshal that do not want to retain existing data
// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// newUnmarshaler is the interface representing objects that can
// unmarshal themselves. The semantics are identical to Unmarshaler.
//
// This exists to support protoc-gen-go generated messages.
// The proto package will stop type-asserting to this interface in the future.
//
// DO NOT DEPEND ON THIS.
type newUnmarshaler interface {
XXX_Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
@ -395,7 +333,13 @@ type Unmarshaler interface {
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
@ -405,8 +349,16 @@ func Unmarshal(buf []byte, pb Message) error {
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
@ -422,12 +374,17 @@ func (p *Buffer) DecodeMessage(pb Message) error {
}
// DecodeGroup reads a tag-delimited group from the Buffer.
// StartGroup tag is already consumed. This function consumes
// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
b := p.buf[p.index:]
x, y := findEndGroup(b)
if x < 0 {
return io.ErrUnexpectedEOF
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
err := Unmarshal(b[:x], pb)
p.index += y
return err
}
// Unmarshal parses the protocol buffer representation in the
@ -438,541 +395,33 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(newUnmarshaler); ok {
err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok {
if isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.GetExtensions()
*ext = append(*ext, o.buf[oi:o.index]...)
}
continue
}
} else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
extmap[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
// Slow workaround for messages that aren't Unmarshalers.
// This includes some hand-coded .pb.go files and
// bootstrap protos.
// TODO: fix all of those and then add Unmarshal to
// the Message interface. Then:
// The cast above and code below can be deleted.
// The old unmarshaler can be deleted.
// Clients can call Unmarshal directly (can already do that, actually).
var info InternalMessageInfo
err := info.Unmarshal(pb, p.buf[p.index:])
p.index = len(p.buf)
return err
}

View File

@ -1,172 +0,0 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"reflect"
)
// Decode a reference to a struct pointer.
func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
panic("not supported, since this is a pointer receiver")
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
bas := structPointer_FieldPointer(base, p.field)
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of references to struct pointers ([]struct).
func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error {
newBas := appendStructPointer(base, p.field, p.sstype)
if is_group {
panic("not supported, maybe in future, if requested.")
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
panic("not supported, since this is not a pointer receiver.")
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, newBas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of references to struct pointers.
func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_ref_struct(p, false, base)
}
func setPtrCustomType(base structPointer, f field, v interface{}) {
if v == nil {
return
}
structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v)))
}
func setCustomType(base structPointer, f field, value interface{}) {
if value == nil {
return
}
v := reflect.ValueOf(value).Elem()
t := reflect.TypeOf(value).Elem()
kind := t.Kind()
switch kind {
case reflect.Slice:
slice := reflect.MakeSlice(t, v.Len(), v.Cap())
reflect.Copy(slice, v)
oldHeader := structPointer_GetSliceHeader(base, f)
oldHeader.Data = slice.Pointer()
oldHeader.Len = v.Len()
oldHeader.Cap = v.Cap()
default:
size := reflect.TypeOf(value).Elem().Size()
structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size))
}
}
func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
i := reflect.New(p.ctype.Elem()).Interface()
custom := (i).(Unmarshaler)
if err := custom.Unmarshal(b); err != nil {
return err
}
setPtrCustomType(base, p.field, custom)
return nil
}
func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
i := reflect.New(p.ctype).Interface()
custom := (i).(Unmarshaler)
if err := custom.Unmarshal(b); err != nil {
return err
}
if custom != nil {
setCustomType(base, p.field, custom)
}
return nil
}
// Decode a slice of bytes ([]byte) into a slice of custom types.
func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
i := reflect.New(p.ctype.Elem()).Interface()
custom := (i).(Unmarshaler)
if err := custom.Unmarshal(b); err != nil {
return err
}
newBas := appendStructPointer(base, p.field, p.ctype)
var zero field
setCustomType(newBas, zero, custom)
return nil
}

63
vendor/github.com/gogo/protobuf/proto/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2018 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import "errors"
// Deprecated: do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func RegisterMessageSetType(Message, int32, string) {}

350
vendor/github.com/gogo/protobuf/proto/discard.go generated vendored Normal file
View File

@ -0,0 +1,350 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
type generatedDiscarder interface {
XXX_DiscardUnknown()
}
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
//
// For proto2 messages, the unknown fields of message extensions are only
// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
if m, ok := m.(generatedDiscarder); ok {
m.XXX_DiscardUnknown()
return
}
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
// but the master branch has no implementation for InternalMessageInfo,
// so it would be more work to replicate that approach.
discardLegacy(m)
}
// DiscardUnknown recursively discards all unknown fields.
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
di := atomicLoadDiscardInfo(&a.discard)
if di == nil {
di = getDiscardInfo(reflect.TypeOf(m).Elem())
atomicStoreDiscardInfo(&a.discard, di)
}
di.discard(toPointer(&m))
}
type discardInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []discardFieldInfo
unrecognized field
}
type discardFieldInfo struct {
field field // Offset of field, guaranteed to be valid
discard func(src pointer)
}
var (
discardInfoMap = map[reflect.Type]*discardInfo{}
discardInfoLock sync.Mutex
)
func getDiscardInfo(t reflect.Type) *discardInfo {
discardInfoLock.Lock()
defer discardInfoLock.Unlock()
di := discardInfoMap[t]
if di == nil {
di = &discardInfo{typ: t}
discardInfoMap[t] = di
}
return di
}
func (di *discardInfo) discard(src pointer) {
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&di.initialized) == 0 {
di.computeDiscardInfo()
}
for _, fi := range di.fields {
sfp := src.offset(fi.field)
fi.discard(sfp)
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
// Ignore lock since DiscardUnknown is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
DiscardUnknown(m)
}
}
}
if di.unrecognized.IsValid() {
*src.offset(di.unrecognized).toBytes() = nil
}
}
func (di *discardInfo) computeDiscardInfo() {
di.lock.Lock()
defer di.lock.Unlock()
if di.initialized != 0 {
return
}
t := di.typ
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
dfi := discardFieldInfo{field: toField(&f)}
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
case isSlice: // E.g., []*pb.T
discardInfo := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sps := src.getPointerSlice()
for _, sp := range sps {
if !sp.isNil() {
discardInfo.discard(sp)
}
}
}
default: // E.g., *pb.T
discardInfo := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sp := src.getPointer()
if !sp.isNil() {
discardInfo.discard(sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
default: // E.g., map[K]V
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
dfi.discard = func(src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
DiscardUnknown(val.Interface().(Message))
}
}
} else {
dfi.discard = func(pointer) {} // Noop
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
default: // E.g., interface{}
// TODO: Make this faster?
dfi.discard = func(src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
DiscardUnknown(sv.Interface().(Message))
}
}
}
}
default:
continue
}
di.fields = append(di.fields, dfi)
}
di.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
di.unrecognized = toField(&f)
}
atomic.StoreInt32(&di.initialized, 1)
}
func discardLegacy(m Message) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Ptr || v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
vf := v.Field(i)
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
case isSlice: // E.g., []*pb.T
for j := 0; j < vf.Len(); j++ {
discardLegacy(vf.Index(j).Interface().(Message))
}
default: // E.g., *pb.T
discardLegacy(vf.Interface().(Message))
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
default: // E.g., map[K]V
tv := vf.Type().Elem()
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
for _, key := range vf.MapKeys() {
val := vf.MapIndex(key)
discardLegacy(val.Interface().(Message))
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
default: // E.g., test_proto.isCommunique_Union interface
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
if !vf.IsNil() {
vf = vf.Elem() // E.g., test_proto.Communique_Msg
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
if vf.Kind() == reflect.Ptr {
discardLegacy(vf.Interface().(Message))
}
}
}
}
}
}
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
if vf.Type() != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
vf.Set(reflect.ValueOf([]byte(nil)))
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(m); err == nil {
// Ignore lock since discardLegacy is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
discardLegacy(m)
}
}
}
}

View File

@ -47,157 +47,3 @@ func (*duration) String() string { return "duration<string>" }
func init() {
RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
}
func (o *Buffer) decDuration() (time.Duration, error) {
b, err := o.DecodeRawBytes(true)
if err != nil {
return 0, err
}
dproto := &duration{}
if err := Unmarshal(b, dproto); err != nil {
return 0, err
}
return durationFromProto(dproto)
}
func (o *Buffer) dec_duration(p *Properties, base structPointer) error {
d, err := o.decDuration()
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, uint64(d))
return nil
}
func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error {
d, err := o.decDuration()
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d))
return nil
}
func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
d, err := o.decDuration()
if err != nil {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
var zero field
setPtrCustomType(newBas, zero, &d)
return nil
}
func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error {
d, err := o.decDuration()
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(uint64(d))
return nil
}
func size_duration(p *Properties, base structPointer) (n int) {
structp := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return 0
}
dur := structPointer_Interface(structp, durationType).(*time.Duration)
d := durationProto(*dur)
size := Size(d)
return size + sizeVarint(uint64(size)) + len(p.tagcode)
}
func (o *Buffer) enc_duration(p *Properties, base structPointer) error {
structp := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return ErrNil
}
dur := structPointer_Interface(structp, durationType).(*time.Duration)
d := durationProto(*dur)
data, err := Marshal(d)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_ref_duration(p *Properties, base structPointer) (n int) {
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
d := durationProto(*dur)
size := Size(d)
return size + sizeVarint(uint64(size)) + len(p.tagcode)
}
func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error {
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
d := durationProto(*dur)
data, err := Marshal(d)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_slice_duration(p *Properties, base structPointer) (n int) {
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
durs := *pdurs
for i := 0; i < len(durs); i++ {
if durs[i] == nil {
return 0
}
dproto := durationProto(*durs[i])
size := Size(dproto)
n += len(p.tagcode) + size + sizeVarint(uint64(size))
}
return n
}
func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error {
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
durs := *pdurs
for i := 0; i < len(durs); i++ {
if durs[i] == nil {
return errRepeatedHasNil
}
dproto := durationProto(*durs[i])
data, err := Marshal(dproto)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}
func size_slice_ref_duration(p *Properties, base structPointer) (n int) {
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
durs := *pdurs
for i := 0; i < len(durs); i++ {
dproto := durationProto(durs[i])
size := Size(dproto)
n += len(p.tagcode) + size + sizeVarint(uint64(size))
}
return n
}
func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error {
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
durs := *pdurs
for i := 0; i < len(durs); i++ {
dproto := durationProto(durs[i])
data, err := Marshal(dproto)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -3,11 +3,6 @@
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// http://github.com/golang/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -18,9 +13,6 @@
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ -36,315 +28,6 @@
package proto
import (
"reflect"
)
func NewRequiredNotSetError(field string) *RequiredNotSetError {
return &RequiredNotSetError{field}
}
type Sizer interface {
Size() int
}
func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
s := *structPointer_Bytes(base, p.field)
if s == nil {
return ErrNil
}
o.buf = append(o.buf, s...)
return nil
}
func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
if s == nil {
return 0
}
n += len(s)
return
}
// Encode a reference to bool pointer.
func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
v := *structPointer_BoolVal(base, p.field)
x := 0
if v {
x = 1
}
o.buf = append(o.buf, p.tagcode...)
p.valEnc(o, uint64(x))
return nil
}
func size_ref_bool(p *Properties, base structPointer) int {
return len(p.tagcode) + 1 // each bool takes exactly one byte
}
// Encode a reference to int32 pointer.
func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Val(base, p.field)
x := int32(word32Val_Get(v))
o.buf = append(o.buf, p.tagcode...)
p.valEnc(o, uint64(x))
return nil
}
func size_ref_int32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field)
x := int32(word32Val_Get(v))
n += len(p.tagcode)
n += p.valSize(uint64(x))
return
}
func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error {
v := structPointer_Word32Val(base, p.field)
x := word32Val_Get(v)
o.buf = append(o.buf, p.tagcode...)
p.valEnc(o, uint64(x))
return nil
}
func size_ref_uint32(p *Properties, base structPointer) (n int) {
v := structPointer_Word32Val(base, p.field)
x := word32Val_Get(v)
n += len(p.tagcode)
n += p.valSize(uint64(x))
return
}
// Encode a reference to an int64 pointer.
func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Val(base, p.field)
x := word64Val_Get(v)
o.buf = append(o.buf, p.tagcode...)
p.valEnc(o, x)
return nil
}
func size_ref_int64(p *Properties, base structPointer) (n int) {
v := structPointer_Word64Val(base, p.field)
x := word64Val_Get(v)
n += len(p.tagcode)
n += p.valSize(x)
return
}
// Encode a reference to a string pointer.
func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error {
v := *structPointer_StringVal(base, p.field)
o.buf = append(o.buf, p.tagcode...)
o.EncodeStringBytes(v)
return nil
}
func size_ref_string(p *Properties, base structPointer) (n int) {
v := *structPointer_StringVal(base, p.field)
n += len(p.tagcode)
n += sizeStringBytes(v)
return
}
// Encode a reference to a message struct.
func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error {
var state errorState
structp := structPointer_GetRefStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return ErrNil
}
// Can the object marshal itself?
if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler)
data, err := m.Marshal()
if err != nil && !state.shouldContinue(err, nil) {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
o.buf = append(o.buf, p.tagcode...)
return o.enc_len_struct(p.sprop, structp, &state)
}
//TODO this is only copied, please fix this
func size_ref_struct_message(p *Properties, base structPointer) int {
structp := structPointer_GetRefStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return 0
}
// Can the object marshal itself?
if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler)
data, _ := m.Marshal()
n0 := len(p.tagcode)
n1 := sizeRawBytes(data)
return n0 + n1
}
n0 := len(p.tagcode)
n1 := size_struct(p.sprop, structp)
n2 := sizeVarint(uint64(n1)) // size of encoded length
return n0 + n1 + n2
}
// Encode a slice of references to message struct pointers ([]struct).
func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
var state errorState
ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
l := ss.Len()
for i := 0; i < l; i++ {
structp := ss.Index(i)
if structPointer_IsNil(structp) {
return errRepeatedHasNil
}
// Can the object marshal itself?
if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler)
data, err := m.Marshal()
if err != nil && !state.shouldContinue(err, nil) {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
continue
}
o.buf = append(o.buf, p.tagcode...)
err := o.enc_len_struct(p.sprop, structp, &state)
if err != nil && !state.shouldContinue(err, nil) {
if err == ErrNil {
return errRepeatedHasNil
}
return err
}
}
return state.err
}
//TODO this is only copied, please fix this
func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
l := ss.Len()
n += l * len(p.tagcode)
for i := 0; i < l; i++ {
structp := ss.Index(i)
if structPointer_IsNil(structp) {
return // return the size up to this point
}
// Can the object marshal itself?
if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler)
data, _ := m.Marshal()
n += len(p.tagcode)
n += sizeRawBytes(data)
continue
}
n0 := size_struct(p.sprop, structp)
n1 := sizeVarint(uint64(n0)) // size of encoded length
n += n0 + n1
}
return
}
func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error {
i := structPointer_InterfaceRef(base, p.field, p.ctype)
if i == nil {
return ErrNil
}
custom := i.(Marshaler)
data, err := custom.Marshal()
if err != nil {
return err
}
if data == nil {
return ErrNil
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_custom_bytes(p *Properties, base structPointer) (n int) {
n += len(p.tagcode)
i := structPointer_InterfaceRef(base, p.field, p.ctype)
if i == nil {
return 0
}
custom := i.(Marshaler)
data, _ := custom.Marshal()
n += sizeRawBytes(data)
return
}
func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error {
custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler)
data, err := custom.Marshal()
if err != nil {
return err
}
if data == nil {
return ErrNil
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_custom_ref_bytes(p *Properties, base structPointer) (n int) {
n += len(p.tagcode)
i := structPointer_InterfaceAt(base, p.field, p.ctype)
if i == nil {
return 0
}
custom := i.(Marshaler)
data, _ := custom.Marshal()
n += sizeRawBytes(data)
return
}
func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error {
inter := structPointer_InterfaceRef(base, p.field, p.ctype)
if inter == nil {
return ErrNil
}
slice := reflect.ValueOf(inter)
l := slice.Len()
for i := 0; i < l; i++ {
v := slice.Index(i)
custom := v.Interface().(Marshaler)
data, err := custom.Marshal()
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}
func size_custom_slice_bytes(p *Properties, base structPointer) (n int) {
inter := structPointer_InterfaceRef(base, p.field, p.ctype)
if inter == nil {
return 0
}
slice := reflect.ValueOf(inter)
l := slice.Len()
n += l * len(p.tagcode)
for i := 0; i < l; i++ {
v := slice.Index(i)
custom := v.Interface().(Marshaler)
data, _ := custom.Marshal()
n += sizeRawBytes(data)
}
return
}

View File

@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
m1, m2 := e1.value, e2.value
if m1 == nil && m2 == nil {
// Both have only encoded form.
if bytes.Equal(e1.enc, e2.enc) {
continue
}
// The bytes are different, but the extensions might still be
// equal. We need to decode them to compare.
}
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
desc = m[extNum]
}
if desc == nil {
// If both have only encoded form and the bytes are the same,
// it is handled above. We get here when the bytes are different.
// We don't know how to decode it, so just compare them as byte
// slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
return false
}
var err error
if m1 == nil {

View File

@ -38,6 +38,7 @@ package proto
import (
"errors"
"fmt"
"io"
"reflect"
"strconv"
"sync"
@ -69,12 +70,6 @@ type extendableProtoV1 interface {
ExtensionMap() map[int32]Extension
}
type extensionsBytes interface {
Message
ExtensionRangeArray() []ExtensionRange
GetExtensions() *[]byte
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
@ -97,14 +92,31 @@ func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, bool) {
if ep, ok := p.(extendableProto); ok {
return ep, ok
func extendable(p interface{}) (extendableProto, error) {
switch p := p.(type) {
case extendableProto:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
if ep, ok := p.(extendableProtoV1); ok {
return extensionAdapter{ep}, ok
return p, nil
case extendableProtoV1:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return nil, false
return extensionAdapter{p}, nil
case extensionsBytes:
return slowExtensionAdapter{p}, nil
}
// Don't allocate a specific error containing %T:
// this is the hot path for Clone and MarshalText.
return nil, errNotExtendable
}
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
func isNilPtr(x interface{}) bool {
v := reflect.ValueOf(x)
return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
@ -149,16 +161,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc
return e.p.extensionMap, &e.p.mu
}
type extensionRange interface {
Message
ExtensionRangeArray() []ExtensionRange
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem()
var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
@ -198,8 +200,8 @@ func SetRawExtension(base Message, id int32, b []byte) {
*ext = append(*ext, b...)
return
}
epb, ok := extendable(base)
if !ok {
epb, err := extendable(base)
if err != nil {
return
}
extmap := epb.extensionsWrite()
@ -207,7 +209,7 @@ func SetRawExtension(base Message, id int32, b []byte) {
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extensionRange, field int32) bool {
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
@ -223,8 +225,11 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if ea, ok := pbi.(slowExtensionAdapter); ok {
pbi = ea.extensionsBytes
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
@ -269,80 +274,6 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensions(e *XXX_InternalExtensions) error {
m, mu := e.extensionsRead()
if m == nil {
return nil // fast path
}
mu.Lock()
defer mu.Unlock()
return encodeExtensionsMap(m)
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionsMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func extensionsSize(e *XXX_InternalExtensions) (n int) {
m, mu := e.extensionsRead()
if m == nil {
return 0
}
mu.Lock()
defer mu.Unlock()
return extensionsMapSize(m)
}
func extensionsMapSize(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
if epb, doki := pb.(extensionsBytes); doki {
@ -366,8 +297,8 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
return false
}
// TODO: Check types, field numbers, etc.?
epb, ok := extendable(pb)
if !ok {
epb, err := extendable(pb)
if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
@ -375,46 +306,26 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
return false
}
mu.Lock()
_, ok = extmap[extension.Field]
_, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
ext := pb.GetExtensions()
for offset < len(*ext) {
tag, n1 := DecodeVarint((*ext)[offset:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
n2, err := size((*ext)[offset+n1:], wireType)
if err != nil {
panic(err)
}
newOffset := offset + n1 + n2
if fieldNum == theFieldNum {
*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
return offset
}
offset = newOffset
}
return -1
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
clearExtension(pb, extension.Field)
}
func clearExtension(pb Message, fieldNum int32) {
if epb, doki := pb.(extensionsBytes); doki {
if epb, ok := pb.(extensionsBytes); ok {
offset := 0
for offset != -1 {
offset = deleteExtension(epb, fieldNum, offset)
}
return
}
epb, ok := extendable(pb)
if !ok {
epb, err := extendable(pb)
if err != nil {
return
}
// TODO: Check types, field numbers, etc.?
@ -422,37 +333,31 @@ func clearExtension(pb Message, fieldNum int32) {
delete(extmap, fieldNum)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
// GetExtension retrieves a proto2 extended field from pb.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
o := 0
for o < len(*ext) {
tag, n := DecodeVarint((*ext)[o:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
l, err := size((*ext)[o+n:], wireType)
return decodeExtensionFromBytes(extension, *ext)
}
epb, err := extendable(pb)
if err != nil {
return nil, err
}
if int32(fieldNum) == extension.Field {
v, err := decodeExtension((*ext)[o:o+n+l], extension)
if err != nil {
return nil, err
if extension.ExtendedType != nil {
// can only check type if this is a complete descriptor
if cerr := checkExtensionTypes(epb, extension); cerr != nil {
return nil, cerr
}
return v, nil
}
o += n + l
}
return defaultExtensionValue(extension)
}
epb, ok := extendable(pb)
if !ok {
return nil, errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err
}
emap, mu := epb.extensionsRead()
@ -479,6 +384,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
return e.value, nil
}
if extension.ExtensionType == nil {
// incomplete descriptor
return e.enc, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
@ -496,6 +406,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
if extension.ExtensionType == nil {
// incomplete descriptor, so no default
return nil, ErrMissingExtension
}
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
@ -530,31 +445,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
// Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
var err error
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
wire := int(x) & 7
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
if err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
if len(b) == 0 {
break
}
}
@ -564,9 +476,13 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(pb, e)
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
@ -581,9 +497,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, ok := extendable(pb)
if !ok {
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
epb, err := extendable(pb)
if err != nil {
return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
@ -610,30 +526,26 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
if epb, doki := pb.(extensionsBytes); doki {
if epb, ok := pb.(extensionsBytes); ok {
ClearExtension(pb, extension)
ext := epb.GetExtensions()
et := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
p := NewBuffer(nil)
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
newb, err := encodeExtension(extension, value)
if err != nil {
return err
}
*ext = append(*ext, p.buf...)
bb := epb.GetExtensions()
*bb = append(*bb, newb...)
return nil
}
epb, ok := extendable(pb)
if !ok {
return errors.New("proto: not an extendable proto")
epb, err := extendable(pb)
if err != nil {
return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@ -656,8 +568,8 @@ func ClearAllExtensions(pb Message) {
*ext = []byte{}
return
}
epb, ok := extendable(pb)
if !ok {
epb, err := extendable(pb)
if err != nil {
return
}
m := epb.extensionsWrite()

View File

@ -32,12 +32,36 @@ import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strings"
"sync"
)
type extensionsBytes interface {
Message
ExtensionRangeArray() []ExtensionRange
GetExtensions() *[]byte
}
type slowExtensionAdapter struct {
extensionsBytes
}
func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
}
func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
b := s.GetExtensions()
m, err := BytesToExtensionsMap(*b)
if err != nil {
panic(err)
}
return m, notLocker{}
}
func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
if reflect.ValueOf(pb).IsNil() {
return ifnotset
@ -56,19 +80,28 @@ func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool
}
func (this *Extension) Equal(that *Extension) bool {
if err := this.Encode(); err != nil {
return false
}
if err := that.Encode(); err != nil {
return false
}
return bytes.Equal(this.enc, that.enc)
}
func (this *Extension) Compare(that *Extension) int {
if err := this.Encode(); err != nil {
return 1
}
if err := that.Encode(); err != nil {
return -1
}
return bytes.Compare(this.enc, that.enc)
}
func SizeOfInternalExtension(m extendableProto) (n int) {
return SizeOfExtensionMap(m.extensionsWrite())
}
func SizeOfExtensionMap(m map[int32]Extension) (n int) {
return extensionsMapSize(m)
info := getMarshalInfo(reflect.TypeOf(m))
return info.sizeV1Extensions(m.extensionsWrite())
}
type sortableMapElem struct {
@ -121,29 +154,48 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error)
return EncodeExtensionMap(m.extensionsWrite(), data)
}
func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
}
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
if err := encodeExtensionsMap(m); err != nil {
o := 0
for _, e := range m {
if err := e.Encode(); err != nil {
return 0, err
}
keys := make([]int, 0, len(m))
for k := range m {
keys = append(keys, int(k))
n := copy(data[o:], e.enc)
if n != len(e.enc) {
return 0, io.ErrShortBuffer
}
sort.Ints(keys)
for _, k := range keys {
n += copy(data[n:], m[int32(k)].enc)
o += n
}
return n, nil
return o, nil
}
func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
o := 0
end := len(data)
for _, e := range m {
if err := e.Encode(); err != nil {
return 0, err
}
n := copy(data[end-len(e.enc):], e.enc)
if n != len(e.enc) {
return 0, io.ErrShortBuffer
}
end -= n
o += n
}
return o, nil
}
func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
if m[id].value == nil || m[id].desc == nil {
return m[id].enc, nil
}
if err := encodeExtensionsMap(m); err != nil {
e := m[id]
if err := e.Encode(); err != nil {
return nil, err
}
return m[id].enc, nil
return e.enc, nil
}
func size(buf []byte, wire int) (int, error) {
@ -218,35 +270,58 @@ func AppendExtension(e Message, tag int32, buf []byte) {
}
}
func encodeExtension(e *Extension) error {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
return nil
func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
ei := u.getExtElemInfo(extension)
v := value
p := toAddrPointer(&v, ei.isptr)
siz := ei.sizer(p, SizeVarint(ei.wiretag))
buf := make([]byte, 0, siz)
return ei.marshaler(buf, p, ei.wiretag, false)
}
func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
o := 0
for o < len(buf) {
tag, n := DecodeVarint((buf)[o:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
if o+n > len(buf) {
return nil, fmt.Errorf("unable to decode extension")
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
l, err := size((buf)[o+n:], wireType)
if err != nil {
return nil, err
}
if int32(fieldNum) == extension.Field {
if o+n+l > len(buf) {
return nil, fmt.Errorf("unable to decode extension")
}
v, err := decodeExtension((buf)[o:o+n+l], extension)
if err != nil {
return nil, err
}
return v, nil
}
o += n + l
}
return defaultExtensionValue(extension)
}
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
func (this *Extension) Encode() error {
if this.enc == nil {
var err error
this.enc, err = encodeExtension(this.desc, this.value)
if err != nil {
return err
}
e.enc = p.buf
}
return nil
}
func (this Extension) GoString() string {
if this.enc == nil {
if err := encodeExtension(&this); err != nil {
panic(err)
}
if err := this.Encode(); err != nil {
return fmt.Sprintf("error encoding extension: %v", err)
}
return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
}
@ -292,3 +367,23 @@ func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
pb := extendable.(extendableProto)
return pb.extensionsWrite()
}
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
ext := pb.GetExtensions()
for offset < len(*ext) {
tag, n1 := DecodeVarint((*ext)[offset:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
n2, err := size((*ext)[offset+n1:], wireType)
if err != nil {
panic(err)
}
newOffset := offset + n1 + n2
if fieldNum == theFieldNum {
*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
return offset
}
offset = newOffset
}
return -1
}

View File

@ -273,6 +273,67 @@ import (
"sync"
)
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
// Marshal reports this when a required field is not initialized.
// Unmarshal reports this when a required field is missing from the wire data.
type RequiredNotSetError struct{ field string }
func (e *RequiredNotSetError) Error() string {
if e.field == "" {
return fmt.Sprintf("proto: required field not set")
}
return fmt.Sprintf("proto: required field %q not set", e.field)
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
type invalidUTF8Error struct{ field string }
func (e *invalidUTF8Error) Error() string {
if e.field == "" {
return "proto: invalid UTF-8 detected"
}
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
}
func (e *invalidUTF8Error) InvalidUTF8() bool {
return true
}
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
// This error should not be exposed to the external API as such errors should
// be recreated with the field information.
var errInvalidUTF8 = &invalidUTF8Error{}
// isNonFatal reports whether the error is either a RequiredNotSet error
// or a InvalidUTF8 error.
func isNonFatal(err error) bool {
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
return true
}
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
return true
}
return false
}
type nonFatal struct{ E error }
// Merge merges err into nf and reports whether it was successful.
// Otherwise it returns false for any fatal non-nil errors.
func (nf *nonFatal) Merge(err error) (ok bool) {
if err == nil {
return true // not an error
}
if !isNonFatal(err) {
return false // fatal error
}
if nf.E == nil {
nf.E = err // store first instance of non-fatal error
}
return true
}
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
@ -280,26 +341,6 @@ type Message interface {
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
@ -309,16 +350,7 @@ type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
@ -343,6 +375,30 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
// SetDeterministic sets whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (p *Buffer) SetDeterministic(deterministic bool) {
p.deterministic = deterministic
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
@ -552,9 +608,11 @@ func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
// v is a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
defaultMu.RLock()
dm, ok := defaults[v.Type()]
@ -656,8 +714,11 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
// f is *T or T or []*T or []T
switch f.Kind() {
case reflect.Struct:
setDefaults(f, recur, zeros)
case reflect.Ptr:
if f.IsNil() {
continue
@ -667,7 +728,7 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
if e.Kind() == reflect.Ptr && e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
@ -739,6 +800,9 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Struct:
nestedMessage = true // non-nullable
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
@ -748,7 +812,7 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
case reflect.Ptr, reflect.Struct:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
@ -831,22 +895,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
return sf, false, nil
}
// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
s := mapKeySorter{vs: vs}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
@ -855,6 +909,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
case reflect.Bool:
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
case reflect.String:
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
default:
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
@ -895,3 +955,13 @@ const GoGoProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const GoGoProtoPackageIsVersion1 = true
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
// This type is not subject to any compatibility guarantee.
type InternalMessageInfo struct {
marshal *marshalInfo
unmarshal *unmarshalInfo
merge *mergeInfo
discard *discardInfo
}

View File

@ -33,6 +33,14 @@ import (
"strconv"
)
type Sizer interface {
Size() int
}
type ProtoSizer interface {
ProtoSize() int
}
func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
s, ok := m[value]
if !ok {

View File

@ -36,12 +36,7 @@ package proto
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@ -94,10 +89,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
}
func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
@ -147,50 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
if err := encodeExtensions(exts); err != nil {
return nil, err
}
m, _ = exts.extensionsRead()
case map[int32]Extension:
if err := encodeExtensionsMap(exts); err != nil {
return nil, err
}
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@ -228,84 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m, _ = exts.extensionsRead()
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@ -38,32 +38,13 @@
package proto
import (
"math"
"reflect"
"sync"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
const unsafeAllowed = false
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// zeroField is a noop when calling pointer.offset.
var zeroField = field([]int{})
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// Extensions returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
// The pointer type is for the table-driven decoder.
// The implementation here uses a reflect.Value of pointer type to
// create a generic pointer. In pointer_unsafe.go we use unsafe
// instead of reflect to implement the same (but faster) interface.
type pointer struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
return pointer{v: reflect.ValueOf(*i)}
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
return pointer{v: u}
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{v: v}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
func (p pointer) isNil() bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
// grow updates the slice s in place to make it one element longer.
// s must be addressable.
// Returns the (addressable) new element.
func grow(s reflect.Value) reflect.Value {
n, m := s.Len(), s.Cap()
if n < m {
p.v.SetLen(n + 1)
s.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
}
return s.Index(n)
}
func (p word32Slice) Len() int {
return p.v.Len()
func (p pointer) toInt64() *int64 {
return p.v.Interface().(*int64)
}
func (p pointer) toInt64Ptr() **int64 {
return p.v.Interface().(**int64)
}
func (p pointer) toInt64Slice() *[]int64 {
return p.v.Interface().(*[]int64)
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
var int32ptr = reflect.TypeOf((*int32)(nil))
func (p pointer) toInt32() *int32 {
return p.v.Convert(int32ptr).Interface().(*int32)
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
// The toInt32Ptr/Slice methods don't work because of enums.
// Instead, we must use set/get methods for the int32ptr/slice case.
/*
func (p pointer) toInt32Ptr() **int32 {
return p.v.Interface().(**int32)
}
func (p pointer) toInt32Slice() *[]int32 {
return p.v.Interface().(*[]int32)
}
*/
func (p pointer) getInt32Ptr() *int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().(*int32)
}
// an enum
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
}
func (p pointer) setInt32Ptr(v int32) {
// Allocate value in a *int32. Possibly convert that to a *enum.
// Then assign it to a **int32 or **enum.
// Note: we can convert *int32 to *enum, but we can't convert
// **int32 to **enum!
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
// getInt32Slice copies []int32 from p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getInt32Slice() []int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().([]int32)
}
// an enum
// Allocate a []int32, then assign []enum's values into it.
// Note: we can't convert []enum to []int32.
slice := p.v.Elem()
s := make([]int32, slice.Len())
for i := 0; i < slice.Len(); i++ {
s[i] = int32(slice.Index(i).Int())
}
return s
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
// setInt32Slice copies []int32 into p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setInt32Slice(v []int32) {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
p.v.Elem().Set(reflect.ValueOf(v))
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
// an enum
// Allocate a []enum, then assign []int32's values into it.
// Note: we can't convert []enum to []int32.
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
for i, x := range v {
slice.Index(i).SetInt(int64(x))
}
panic("unreachable")
p.v.Elem().Set(slice)
}
func (p pointer) appendInt32Slice(v int32) {
grow(p.v.Elem()).SetInt(int64(v))
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
func (p pointer) toUint64() *uint64 {
return p.v.Interface().(*uint64)
}
func (p pointer) toUint64Ptr() **uint64 {
return p.v.Interface().(**uint64)
}
func (p pointer) toUint64Slice() *[]uint64 {
return p.v.Interface().(*[]uint64)
}
func (p pointer) toUint32() *uint32 {
return p.v.Interface().(*uint32)
}
func (p pointer) toUint32Ptr() **uint32 {
return p.v.Interface().(**uint32)
}
func (p pointer) toUint32Slice() *[]uint32 {
return p.v.Interface().(*[]uint32)
}
func (p pointer) toBool() *bool {
return p.v.Interface().(*bool)
}
func (p pointer) toBoolPtr() **bool {
return p.v.Interface().(**bool)
}
func (p pointer) toBoolSlice() *[]bool {
return p.v.Interface().(*[]bool)
}
func (p pointer) toFloat64() *float64 {
return p.v.Interface().(*float64)
}
func (p pointer) toFloat64Ptr() **float64 {
return p.v.Interface().(**float64)
}
func (p pointer) toFloat64Slice() *[]float64 {
return p.v.Interface().(*[]float64)
}
func (p pointer) toFloat32() *float32 {
return p.v.Interface().(*float32)
}
func (p pointer) toFloat32Ptr() **float32 {
return p.v.Interface().(**float32)
}
func (p pointer) toFloat32Slice() *[]float32 {
return p.v.Interface().(*[]float32)
}
func (p pointer) toString() *string {
return p.v.Interface().(*string)
}
func (p pointer) toStringPtr() **string {
return p.v.Interface().(**string)
}
func (p pointer) toStringSlice() *[]string {
return p.v.Interface().(*[]string)
}
func (p pointer) toBytes() *[]byte {
return p.v.Interface().(*[]byte)
}
func (p pointer) toBytesSlice() *[][]byte {
return p.v.Interface().(*[][]byte)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return p.v.Interface().(*XXX_InternalExtensions)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return p.v.Interface().(*map[int32]Extension)
}
func (p pointer) getPointer() pointer {
return pointer{v: p.v.Elem()}
}
func (p pointer) setPointer(q pointer) {
p.v.Elem().Set(q.v)
}
func (p pointer) appendPointer(q pointer) {
grow(p.v.Elem()).Set(q.v)
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
// getPointerSlice copies []*T from p as a new []pointer.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getPointerSlice() []pointer {
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
// setPointerSlice copies []pointer into p as a new []*T.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setPointerSlice(v []pointer) {
if v == nil {
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
for _, p := range v {
s = reflect.Append(s, p.v)
}
panic("unreachable")
p.v.Elem().Set(s)
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
if p.v.Elem().IsNil() {
return pointer{v: p.v.Elem()}
}
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
func (p word64Slice) Len() int {
return p.v.Len()
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
// TODO: check that p.v.Type().Elem() == t?
return p.v
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}
var atomicLock sync.Mutex

View File

@ -1,6 +1,6 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
@ -26,7 +26,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
@ -34,52 +38,22 @@ import (
"reflect"
)
func structPointer_FieldPointer(p structPointer, f field) structPointer {
panic("not implemented")
// TODO: untested, so probably incorrect.
func (p pointer) getRef() pointer {
return pointer{v: p.v.Addr()}
}
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
panic("not implemented")
func (p pointer) appendRef(v pointer, typ reflect.Type) {
slice := p.getSlice(typ)
elem := v.asPointerTo(typ).Elem()
newSlice := reflect.Append(slice, elem)
slice.Set(newSlice)
}
func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
panic("not implemented")
}
func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
panic("not implemented")
}
func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
panic("not implemented")
}
func structPointer_Add(p structPointer, size field) structPointer {
panic("not implemented")
}
func structPointer_Len(p structPointer, f field) int {
panic("not implemented")
}
func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
panic("not implemented")
}
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
panic("not implemented")
}
func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice {
panic("not implemented")
}
type structRefSlice struct{}
func (v *structRefSlice) Len() int {
panic("not implemented")
}
func (v *structRefSlice) Index(i int) structPointer {
panic("not implemented")
func (p pointer) getSlice(typ reflect.Type) reflect.Value {
sliceTyp := reflect.SliceOf(typ)
slice := p.asPointerTo(sliceTyp)
slice = slice.Elem()
return slice
}

View File

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine,!js
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
@ -37,38 +37,13 @@ package proto
import (
"reflect"
"sync/atomic"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
const unsafeAllowed = true
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// zeroField is a noop when calling pointer.offset.
const zeroField = field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
return f != invalidField
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
// The pointer type below is for the new table-driven encoder/decoder.
// The implementation here uses unsafe.Pointer to create a generic pointer.
// In pointer_reflect.go we use reflect instead of unsafe to implement
// the same (but slower) interface.
type pointer struct {
p unsafe.Pointer
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
// size of pointer
var ptrSize = unsafe.Sizeof(uintptr(0))
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
// Super-tricky - read pointer out of data word of interface value.
// Saves ~25ns over the equivalent:
// return valToPointer(reflect.ValueOf(*i))
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
// The interface is not of pointer type. The data word is the pointer
// to the data.
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{p: unsafe.Pointer(v.Pointer())}
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
// For safety, we should panic if !f.IsValid, however calling panic causes
// this to no longer be inlineable, which is a serious performance cost.
/*
if !f.IsValid() {
panic("invalid field")
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
*/
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
func word64_IsNil(p word64) bool {
return *p == nil
func (p pointer) isNil() bool {
return p.p == nil
}
func word64_Get(p word64) uint64 {
return **p
func (p pointer) toInt64() *int64 {
return (*int64)(p.p)
}
func (p pointer) toInt64Ptr() **int64 {
return (**int64)(p.p)
}
func (p pointer) toInt64Slice() *[]int64 {
return (*[]int64)(p.p)
}
func (p pointer) toInt32() *int32 {
return (*int32)(p.p)
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
/*
func (p pointer) toInt32Ptr() **int32 {
return (**int32)(p.p)
}
func (p pointer) toInt32Slice() *[]int32 {
return (*[]int32)(p.p)
}
*/
func (p pointer) getInt32Ptr() *int32 {
return *(**int32)(p.p)
}
func (p pointer) setInt32Ptr(v int32) {
*(**int32)(p.p) = &v
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
// getInt32Slice loads a []int32 from p.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getInt32Slice() []int32 {
return *(*[]int32)(p.p)
}
func word64Val_Get(p word64Val) uint64 {
return *p
// setInt32Slice stores a []int32 to p.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setInt32Slice(v []int32) {
*(*[]int32)(p.p) = v
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
func (p pointer) appendInt32Slice(v int32) {
s := (*[]int32)(p.p)
*s = append(*s, v)
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
func (p pointer) toUint64() *uint64 {
return (*uint64)(p.p)
}
func (p pointer) toUint64Ptr() **uint64 {
return (**uint64)(p.p)
}
func (p pointer) toUint64Slice() *[]uint64 {
return (*[]uint64)(p.p)
}
func (p pointer) toUint32() *uint32 {
return (*uint32)(p.p)
}
func (p pointer) toUint32Ptr() **uint32 {
return (**uint32)(p.p)
}
func (p pointer) toUint32Slice() *[]uint32 {
return (*[]uint32)(p.p)
}
func (p pointer) toBool() *bool {
return (*bool)(p.p)
}
func (p pointer) toBoolPtr() **bool {
return (**bool)(p.p)
}
func (p pointer) toBoolSlice() *[]bool {
return (*[]bool)(p.p)
}
func (p pointer) toFloat64() *float64 {
return (*float64)(p.p)
}
func (p pointer) toFloat64Ptr() **float64 {
return (**float64)(p.p)
}
func (p pointer) toFloat64Slice() *[]float64 {
return (*[]float64)(p.p)
}
func (p pointer) toFloat32() *float32 {
return (*float32)(p.p)
}
func (p pointer) toFloat32Ptr() **float32 {
return (**float32)(p.p)
}
func (p pointer) toFloat32Slice() *[]float32 {
return (*[]float32)(p.p)
}
func (p pointer) toString() *string {
return (*string)(p.p)
}
func (p pointer) toStringPtr() **string {
return (**string)(p.p)
}
func (p pointer) toStringSlice() *[]string {
return (*[]string)(p.p)
}
func (p pointer) toBytes() *[]byte {
return (*[]byte)(p.p)
}
func (p pointer) toBytesSlice() *[][]byte {
return (*[][]byte)(p.p)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(p.p)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return (*map[int32]Extension)(p.p)
}
// getPointerSlice loads []*T from p as a []pointer.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getPointerSlice() []pointer {
// Super-tricky - p should point to a []*T where T is a
// message type. We load it as []pointer.
return *(*[]pointer)(p.p)
}
// setPointerSlice stores []pointer into p as a []*T.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setPointerSlice(v []pointer) {
// Super-tricky - p should point to a []*T where T is a
// message type. We store it as []pointer.
*(*[]pointer)(p.p) = v
}
// getPointer loads the pointer at p and returns it.
func (p pointer) getPointer() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
}
// setPointer stores the pointer q at p.
func (p pointer) setPointer(q pointer) {
*(*unsafe.Pointer)(p.p) = q.p
}
// append q to the slice pointed to by p.
func (p pointer) appendPointer(q pointer) {
s := (*[]unsafe.Pointer)(p.p)
*s = append(*s, q.p)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
// Super-tricky - read pointer out of data word of interface value.
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
}
// asPointerTo returns a reflect.Value that is a pointer to an
// object of type t stored at p.
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
return reflect.NewAt(t, p.p)
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}

View File

@ -1,6 +1,6 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
@ -26,7 +26,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine,!js
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
@ -37,92 +37,20 @@ import (
"unsafe"
)
func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
point := unsafe.Pointer(uintptr(p) + uintptr(f))
r := reflect.NewAt(t, point)
return r.Interface()
func (p pointer) getRef() pointer {
return pointer{p: (unsafe.Pointer)(&p.p)}
}
func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
point := unsafe.Pointer(uintptr(p) + uintptr(f))
r := reflect.NewAt(t, point)
if r.Elem().IsNil() {
return nil
}
return r.Elem().Interface()
func (p pointer) appendRef(v pointer, typ reflect.Type) {
slice := p.getSlice(typ)
elem := v.asPointerTo(typ).Elem()
newSlice := reflect.Append(slice, elem)
slice.Set(newSlice)
}
func copyUintPtr(oldptr, newptr uintptr, size int) {
oldbytes := make([]byte, 0)
oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
oldslice.Data = oldptr
oldslice.Len = size
oldslice.Cap = size
newbytes := make([]byte, 0)
newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
newslice.Data = newptr
newslice.Len = size
newslice.Cap = size
copy(newbytes, oldbytes)
}
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
copyUintPtr(uintptr(oldptr), uintptr(newptr), size)
}
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
size := typ.Elem().Size()
oldHeader := structPointer_GetSliceHeader(base, f)
oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem()
newLen := oldHeader.Len + 1
newSlice := reflect.MakeSlice(typ, newLen, newLen)
reflect.Copy(newSlice, oldSlice)
bas := toStructPointer(newSlice)
oldHeader.Data = uintptr(bas)
oldHeader.Len = newLen
oldHeader.Cap = newLen
return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size)))
}
func structPointer_FieldPointer(p structPointer, f field) structPointer {
return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_Add(p structPointer, size field) structPointer {
return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size)))
}
func structPointer_Len(p structPointer, f field) int {
return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
}
func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice {
return &structRefSlice{p: p, f: f, size: size}
}
// A structRefSlice represents a slice of structs (themselves submessages or groups).
type structRefSlice struct {
p structPointer
f field
size uintptr
}
func (v *structRefSlice) Len() int {
return structPointer_Len(v.p, v.f)
}
func (v *structRefSlice) Index(i int) structPointer {
ss := structPointer_GetStructPointer(v.p, v.f)
ss1 := structPointer_GetRefStructPointer(ss, 0)
return structPointer_Add(ss1, field(uintptr(i)*v.size))
func (p pointer) getSlice(typ reflect.Type) reflect.Value {
sliceTyp := reflect.SliceOf(typ)
slice := p.asPointerTo(sliceTyp)
slice = slice.Elem()
return slice
}

View File

@ -63,42 +63,6 @@ const (
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@ -145,13 +109,6 @@ type StructProperties struct {
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
@ -187,7 +144,7 @@ type Properties struct {
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
Default string // default value
@ -196,37 +153,21 @@ type Properties struct {
CastType string
StdTime bool
StdDuration bool
WktPointer bool
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sstype reflect.Type // set for slices of structs types only
ctype reflect.Type // set for custom types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
MapKeyProp *Properties // set for map types only
MapValProp *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
@ -272,29 +213,14 @@ func (p *Properties) Parse(s string) {
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
@ -309,6 +235,7 @@ func (p *Properties) Parse(s string) {
return
}
outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
@ -336,7 +263,7 @@ func (p *Properties) Parse(s string) {
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
break outer
}
case strings.HasPrefix(f, "embedded="):
p.OrigName = strings.Split(f, "=")[1]
@ -348,301 +275,58 @@ func (p *Properties) Parse(s string) {
p.StdTime = true
case f == "stdduration":
p.StdDuration = true
case f == "wktptr":
p.WktPointer = true
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
// setFieldProps initializes the field properties for submessages and maps.
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
isMap := typ.Kind() == reflect.Map
if len(p.CustomType) > 0 && !isMap {
p.setCustomEncAndDec(typ)
p.ctype = typ
p.setTag(lockGetProp)
return
}
if p.StdTime && !isMap {
p.setTimeEncAndDec(typ)
p.setTag(lockGetProp)
return
}
if p.StdDuration && !isMap {
p.setDurationEncAndDec(typ)
p.setTag(lockGetProp)
return
}
if p.WktPointer && !isMap {
p.setTag(lockGetProp)
return
}
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
} else {
p.enc = (*Buffer).enc_ref_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_ref_bool
}
case reflect.Int32:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
} else {
p.enc = (*Buffer).enc_ref_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_ref_int32
}
case reflect.Uint32:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
} else {
p.enc = (*Buffer).enc_ref_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_ref_uint32
}
case reflect.Int64, reflect.Uint64:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
} else {
p.enc = (*Buffer).enc_ref_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_ref_int64
}
case reflect.Float32:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
} else {
p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_ref_uint32
}
case reflect.Float64:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
} else {
p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_ref_int64
}
case reflect.String:
if p.proto3 {
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
} else {
p.enc = (*Buffer).enc_ref_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_ref_string
}
case reflect.Struct:
p.stype = typ
p.isMarshaler = isMarshaler(typ)
p.isUnmarshaler = isUnmarshaler(typ)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_ref_struct_message
p.dec = (*Buffer).dec_ref_struct_message
p.size = size_ref_struct_message
} else {
fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ)
}
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.dec = (*Buffer).dec_slice_byte
if p.proto3 {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
} else {
p.enc = (*Buffer).enc_slice_byte
p.size = size_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
p.stype = t3
}
case reflect.Struct:
p.setSliceOfNonPointerStructs(t1)
p.stype = t2
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
p.MapKeyProp = &Properties{}
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
@ -650,29 +334,16 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.CustomType = p.CustomType
p.mvalprop.StdDuration = p.StdDuration
p.mvalprop.StdTime = p.StdTime
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
p.MapValProp.CustomType = p.CustomType
p.MapValProp.StdDuration = p.StdDuration
p.MapValProp.StdTime = p.StdTime
p.MapValProp.WktPointer = p.WktPointer
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
p.setTag(lockGetProp)
}
func (p *Properties) setTag(lockGetProp bool) {
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
@ -684,19 +355,8 @@ func (p *Properties) setTag(lockGetProp bool) {
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
@ -706,14 +366,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
p.setFieldProps(typ, f, lockGetProp)
}
var (
@ -734,9 +391,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
@ -749,24 +403,14 @@ func GetProperties(t reflect.Type) *StructProperties {
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
reflect.PtrTo(t).Implements(extendableProtoV1Type) ||
reflect.PtrTo(t).Implements(extendableBytesType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
@ -777,23 +421,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_InternalExtensions" { // special case
p.enc = (*Buffer).enc_exts
p.dec = nil // not needed
p.size = size_exts
} else if f.Name == "XXX_extensions" { // special case
if len(f.Tag.Get("protobuf")) > 0 {
p.enc = (*Buffer).enc_ext_slice_byte
p.dec = nil // not needed
p.size = size_ext_slice_byte
} else {
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
} else if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
isOneofMessage = true
@ -809,9 +436,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
@ -822,8 +446,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
_, _, _, oots = om.XXX_OneofFuncs()
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
@ -873,30 +496,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
@ -925,20 +524,42 @@ func EnumValueMap(enumType string) map[string]int32 {
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
// Generated code always calls RegisterType with nil x.
// This check is just for extra safety.
protoTypedNils[name] = x
} else {
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
}
revProtoTypes[t] = name
}
// RegisterMapType is called from generated code and maps from the fully qualified
// proto name to the native map type of the proto map definition.
func RegisterMapType(x interface{}, name string) {
if reflect.TypeOf(x).Kind() != reflect.Map {
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
}
if _, ok := protoMapTypes[name]; ok {
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoMapTypes[name] = t
revProtoTypes[t] = name
}
@ -954,7 +575,14 @@ func MessageName(x Message) string {
}
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }
// The type is not guaranteed to implement proto.Message if the name refers to a
// map entry.
func MessageType(name string) reflect.Type {
if t, ok := protoTypedNils[name]; ok {
return reflect.TypeOf(t)
}
return protoMapTypes[name]
}
// A registry of all linked proto files.
var (

View File

@ -1,6 +1,6 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
@ -29,83 +29,8 @@
package proto
import (
"fmt"
"os"
"reflect"
)
func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
p.ctype = typ
if p.Repeated {
p.enc = (*Buffer).enc_custom_slice_bytes
p.dec = (*Buffer).dec_custom_slice_bytes
p.size = size_custom_slice_bytes
} else if typ.Kind() == reflect.Ptr {
p.enc = (*Buffer).enc_custom_bytes
p.dec = (*Buffer).dec_custom_bytes
p.size = size_custom_bytes
} else {
p.enc = (*Buffer).enc_custom_ref_bytes
p.dec = (*Buffer).dec_custom_ref_bytes
p.size = size_custom_ref_bytes
}
}
func (p *Properties) setDurationEncAndDec(typ reflect.Type) {
if p.Repeated {
if typ.Elem().Kind() == reflect.Ptr {
p.enc = (*Buffer).enc_slice_duration
p.dec = (*Buffer).dec_slice_duration
p.size = size_slice_duration
} else {
p.enc = (*Buffer).enc_slice_ref_duration
p.dec = (*Buffer).dec_slice_ref_duration
p.size = size_slice_ref_duration
}
} else if typ.Kind() == reflect.Ptr {
p.enc = (*Buffer).enc_duration
p.dec = (*Buffer).dec_duration
p.size = size_duration
} else {
p.enc = (*Buffer).enc_ref_duration
p.dec = (*Buffer).dec_ref_duration
p.size = size_ref_duration
}
}
func (p *Properties) setTimeEncAndDec(typ reflect.Type) {
if p.Repeated {
if typ.Elem().Kind() == reflect.Ptr {
p.enc = (*Buffer).enc_slice_time
p.dec = (*Buffer).dec_slice_time
p.size = size_slice_time
} else {
p.enc = (*Buffer).enc_slice_ref_time
p.dec = (*Buffer).dec_slice_ref_time
p.size = size_slice_ref_time
}
} else if typ.Kind() == reflect.Ptr {
p.enc = (*Buffer).enc_time
p.dec = (*Buffer).dec_time
p.size = size_time
} else {
p.enc = (*Buffer).enc_ref_time
p.dec = (*Buffer).dec_ref_time
p.size = size_ref_time
}
}
func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
t2 := typ.Elem()
p.sstype = typ
p.stype = t2
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
p.enc = (*Buffer).enc_slice_ref_struct_message
p.dec = (*Buffer).dec_slice_ref_struct_message
p.size = size_slice_ref_struct_message
if p.Wire != "bytes" {
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2)
}
}
var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()

3006
vendor/github.com/gogo/protobuf/proto/table_marshal.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,388 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"reflect"
"time"
)
// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
// It marshal a message T instead of a *T
func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
siz := u.size(ptr)
return siz + SizeVarint(uint64(siz)) + tagsize
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
b = appendVarint(b, wiretag)
siz := u.cachedsize(ptr)
b = appendVarint(b, uint64(siz))
return u.marshal(b, ptr, deterministic)
}
}
// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
// It marshals a slice of messages []T instead of []*T
func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
s := ptr.getSlice(u.typ)
n := 0
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
e := elem.Interface()
v := toAddrPointer(&e, false)
siz := u.size(v)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
return n
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getSlice(u.typ)
var err, errreq error
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
e := elem.Interface()
v := toAddrPointer(&e, false)
b = appendVarint(b, wiretag)
siz := u.size(v)
b = appendVarint(b, uint64(siz))
b, err = u.marshal(b, v, deterministic)
if err != nil {
if _, ok := err.(*RequiredNotSetError); ok {
// Required field in submessage is not set.
// We record the error but keep going, to give a complete marshaling.
if errreq == nil {
errreq = err
}
continue
}
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
return b, errreq
}
}
func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
if ptr.isNil() {
return 0
}
m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
siz := m.Size()
return tagsize + SizeVarint(uint64(siz)) + siz
}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
if ptr.isNil() {
return b, nil
}
m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
siz := m.Size()
buf, err := m.Marshal()
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(siz))
b = append(b, buf...)
return b, nil
}
}
func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
m := ptr.asPointerTo(u.typ).Interface().(custom)
siz := m.Size()
return tagsize + SizeVarint(uint64(siz)) + siz
}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
m := ptr.asPointerTo(u.typ).Interface().(custom)
siz := m.Size()
buf, err := m.Marshal()
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(siz))
b = append(b, buf...)
return b, nil
}
}
func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
ts, err := timestampProto(*t)
if err != nil {
return 0
}
siz := Size(ts)
return tagsize + SizeVarint(uint64(siz)) + siz
}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
ts, err := timestampProto(*t)
if err != nil {
return nil, err
}
buf, err := Marshal(ts)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(buf)))
b = append(b, buf...)
return b, nil
}
}
func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
if ptr.isNil() {
return 0
}
t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
ts, err := timestampProto(*t)
if err != nil {
return 0
}
siz := Size(ts)
return tagsize + SizeVarint(uint64(siz)) + siz
}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
if ptr.isNil() {
return b, nil
}
t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
ts, err := timestampProto(*t)
if err != nil {
return nil, err
}
buf, err := Marshal(ts)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(buf)))
b = append(b, buf...)
return b, nil
}
}
func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
s := ptr.getSlice(u.typ)
n := 0
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
t := elem.Interface().(time.Time)
ts, err := timestampProto(t)
if err != nil {
return 0
}
siz := Size(ts)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
return n
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getSlice(u.typ)
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
t := elem.Interface().(time.Time)
ts, err := timestampProto(t)
if err != nil {
return nil, err
}
siz := Size(ts)
buf, err := Marshal(ts)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(siz))
b = append(b, buf...)
}
return b, nil
}
}
func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
s := ptr.getSlice(reflect.PtrTo(u.typ))
n := 0
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
t := elem.Interface().(*time.Time)
ts, err := timestampProto(*t)
if err != nil {
return 0
}
siz := Size(ts)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
return n
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getSlice(reflect.PtrTo(u.typ))
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
t := elem.Interface().(*time.Time)
ts, err := timestampProto(*t)
if err != nil {
return nil, err
}
siz := Size(ts)
buf, err := Marshal(ts)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(siz))
b = append(b, buf...)
}
return b, nil
}
}
func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
dur := durationProto(*d)
siz := Size(dur)
return tagsize + SizeVarint(uint64(siz)) + siz
}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
dur := durationProto(*d)
buf, err := Marshal(dur)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(buf)))
b = append(b, buf...)
return b, nil
}
}
func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
if ptr.isNil() {
return 0
}
d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
dur := durationProto(*d)
siz := Size(dur)
return tagsize + SizeVarint(uint64(siz)) + siz
}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
if ptr.isNil() {
return b, nil
}
d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
dur := durationProto(*d)
buf, err := Marshal(dur)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(buf)))
b = append(b, buf...)
return b, nil
}
}
func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
s := ptr.getSlice(u.typ)
n := 0
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
d := elem.Interface().(time.Duration)
dur := durationProto(d)
siz := Size(dur)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
return n
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getSlice(u.typ)
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
d := elem.Interface().(time.Duration)
dur := durationProto(d)
siz := Size(dur)
buf, err := Marshal(dur)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(siz))
b = append(b, buf...)
}
return b, nil
}
}
func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
return func(ptr pointer, tagsize int) int {
s := ptr.getSlice(reflect.PtrTo(u.typ))
n := 0
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
d := elem.Interface().(*time.Duration)
dur := durationProto(*d)
siz := Size(dur)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
return n
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getSlice(reflect.PtrTo(u.typ))
for i := 0; i < s.Len(); i++ {
elem := s.Index(i)
d := elem.Interface().(*time.Duration)
dur := durationProto(*d)
siz := Size(dur)
buf, err := Marshal(dur)
if err != nil {
return nil, err
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(siz))
b = append(b, buf...)
}
return b, nil
}
}

657
vendor/github.com/gogo/protobuf/proto/table_merge.go generated vendored Normal file
View File

@ -0,0 +1,657 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
// Merge merges the src message into dst.
// This assumes that dst and src of the same type and are non-nil.
func (a *InternalMessageInfo) Merge(dst, src Message) {
mi := atomicLoadMergeInfo(&a.merge)
if mi == nil {
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
atomicStoreMergeInfo(&a.merge, mi)
}
mi.merge(toPointer(&dst), toPointer(&src))
}
type mergeInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []mergeFieldInfo
unrecognized field // Offset of XXX_unrecognized
}
type mergeFieldInfo struct {
field field // Offset of field, guaranteed to be valid
// isPointer reports whether the value in the field is a pointer.
// This is true for the following situations:
// * Pointer to struct
// * Pointer to basic type (proto2 only)
// * Slice (first value in slice header is a pointer)
// * String (first value in string header is a pointer)
isPointer bool
// basicWidth reports the width of the field assuming that it is directly
// embedded in the struct (as is the case for basic types in proto3).
// The possible values are:
// 0: invalid
// 1: bool
// 4: int32, uint32, float32
// 8: int64, uint64, float64
basicWidth int
// Where dst and src are pointers to the types being merged.
merge func(dst, src pointer)
}
var (
mergeInfoMap = map[reflect.Type]*mergeInfo{}
mergeInfoLock sync.Mutex
)
func getMergeInfo(t reflect.Type) *mergeInfo {
mergeInfoLock.Lock()
defer mergeInfoLock.Unlock()
mi := mergeInfoMap[t]
if mi == nil {
mi = &mergeInfo{typ: t}
mergeInfoMap[t] = mi
}
return mi
}
// merge merges src into dst assuming they are both of type *mi.typ.
func (mi *mergeInfo) merge(dst, src pointer) {
if dst.isNil() {
panic("proto: nil destination")
}
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&mi.initialized) == 0 {
mi.computeMergeInfo()
}
for _, fi := range mi.fields {
sfp := src.offset(fi.field)
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
continue
}
if fi.basicWidth > 0 {
switch {
case fi.basicWidth == 1 && !*sfp.toBool():
continue
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
continue
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
continue
}
}
}
dfp := dst.offset(fi.field)
fi.merge(dfp, sfp)
}
// TODO: Make this faster?
out := dst.asPointerTo(mi.typ).Elem()
in := src.asPointerTo(mi.typ).Elem()
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
if mi.unrecognized.IsValid() {
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
}
}
}
func (mi *mergeInfo) computeMergeInfo() {
mi.lock.Lock()
defer mi.lock.Unlock()
if mi.initialized != 0 {
return
}
t := mi.typ
n := t.NumField()
props := GetProperties(t)
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mfi := mergeFieldInfo{field: toField(&f)}
tf := f.Type
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
switch tf.Kind() {
case reflect.Ptr, reflect.Slice, reflect.String:
// As a special case, we assume slices and strings are pointers
// since we know that the first field in the SliceSlice or
// StringHeader is a data pointer.
mfi.isPointer = true
case reflect.Bool:
mfi.basicWidth = 1
case reflect.Int32, reflect.Uint32, reflect.Float32:
mfi.basicWidth = 4
case reflect.Int64, reflect.Uint64, reflect.Float64:
mfi.basicWidth = 8
}
}
// Unwrap tf to get at its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic("both pointer and slice for basic type in " + tf.Name())
}
switch tf.Kind() {
case reflect.Int32:
switch {
case isSlice: // E.g., []int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
/*
sfsp := src.toInt32Slice()
if *sfsp != nil {
dfsp := dst.toInt32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
*/
sfs := src.getInt32Slice()
if sfs != nil {
dfs := dst.getInt32Slice()
dfs = append(dfs, sfs...)
if dfs == nil {
dfs = []int32{}
}
dst.setInt32Slice(dfs)
}
}
case isPointer: // E.g., *int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
/*
sfpp := src.toInt32Ptr()
if *sfpp != nil {
dfpp := dst.toInt32Ptr()
if *dfpp == nil {
*dfpp = Int32(**sfpp)
} else {
**dfpp = **sfpp
}
}
*/
sfp := src.getInt32Ptr()
if sfp != nil {
dfp := dst.getInt32Ptr()
if dfp == nil {
dst.setInt32Ptr(*sfp)
} else {
*dfp = *sfp
}
}
}
default: // E.g., int32
mfi.merge = func(dst, src pointer) {
if v := *src.toInt32(); v != 0 {
*dst.toInt32() = v
}
}
}
case reflect.Int64:
switch {
case isSlice: // E.g., []int64
mfi.merge = func(dst, src pointer) {
sfsp := src.toInt64Slice()
if *sfsp != nil {
dfsp := dst.toInt64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
}
case isPointer: // E.g., *int64
mfi.merge = func(dst, src pointer) {
sfpp := src.toInt64Ptr()
if *sfpp != nil {
dfpp := dst.toInt64Ptr()
if *dfpp == nil {
*dfpp = Int64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., int64
mfi.merge = func(dst, src pointer) {
if v := *src.toInt64(); v != 0 {
*dst.toInt64() = v
}
}
}
case reflect.Uint32:
switch {
case isSlice: // E.g., []uint32
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint32Slice()
if *sfsp != nil {
dfsp := dst.toUint32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint32{}
}
}
}
case isPointer: // E.g., *uint32
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint32Ptr()
if *sfpp != nil {
dfpp := dst.toUint32Ptr()
if *dfpp == nil {
*dfpp = Uint32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint32
mfi.merge = func(dst, src pointer) {
if v := *src.toUint32(); v != 0 {
*dst.toUint32() = v
}
}
}
case reflect.Uint64:
switch {
case isSlice: // E.g., []uint64
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint64Slice()
if *sfsp != nil {
dfsp := dst.toUint64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint64{}
}
}
}
case isPointer: // E.g., *uint64
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint64Ptr()
if *sfpp != nil {
dfpp := dst.toUint64Ptr()
if *dfpp == nil {
*dfpp = Uint64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint64
mfi.merge = func(dst, src pointer) {
if v := *src.toUint64(); v != 0 {
*dst.toUint64() = v
}
}
}
case reflect.Float32:
switch {
case isSlice: // E.g., []float32
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat32Slice()
if *sfsp != nil {
dfsp := dst.toFloat32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float32{}
}
}
}
case isPointer: // E.g., *float32
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat32Ptr()
if *sfpp != nil {
dfpp := dst.toFloat32Ptr()
if *dfpp == nil {
*dfpp = Float32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float32
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat32(); v != 0 {
*dst.toFloat32() = v
}
}
}
case reflect.Float64:
switch {
case isSlice: // E.g., []float64
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat64Slice()
if *sfsp != nil {
dfsp := dst.toFloat64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float64{}
}
}
}
case isPointer: // E.g., *float64
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat64Ptr()
if *sfpp != nil {
dfpp := dst.toFloat64Ptr()
if *dfpp == nil {
*dfpp = Float64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float64
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat64(); v != 0 {
*dst.toFloat64() = v
}
}
}
case reflect.Bool:
switch {
case isSlice: // E.g., []bool
mfi.merge = func(dst, src pointer) {
sfsp := src.toBoolSlice()
if *sfsp != nil {
dfsp := dst.toBoolSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []bool{}
}
}
}
case isPointer: // E.g., *bool
mfi.merge = func(dst, src pointer) {
sfpp := src.toBoolPtr()
if *sfpp != nil {
dfpp := dst.toBoolPtr()
if *dfpp == nil {
*dfpp = Bool(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., bool
mfi.merge = func(dst, src pointer) {
if v := *src.toBool(); v {
*dst.toBool() = v
}
}
}
case reflect.String:
switch {
case isSlice: // E.g., []string
mfi.merge = func(dst, src pointer) {
sfsp := src.toStringSlice()
if *sfsp != nil {
dfsp := dst.toStringSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []string{}
}
}
}
case isPointer: // E.g., *string
mfi.merge = func(dst, src pointer) {
sfpp := src.toStringPtr()
if *sfpp != nil {
dfpp := dst.toStringPtr()
if *dfpp == nil {
*dfpp = String(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., string
mfi.merge = func(dst, src pointer) {
if v := *src.toString(); v != "" {
*dst.toString() = v
}
}
}
case reflect.Slice:
isProto3 := props.Prop[i].proto3
switch {
case isPointer:
panic("bad pointer in byte slice case in " + tf.Name())
case tf.Elem().Kind() != reflect.Uint8:
panic("bad element kind in byte slice case in " + tf.Name())
case isSlice: // E.g., [][]byte
mfi.merge = func(dst, src pointer) {
sbsp := src.toBytesSlice()
if *sbsp != nil {
dbsp := dst.toBytesSlice()
for _, sb := range *sbsp {
if sb == nil {
*dbsp = append(*dbsp, nil)
} else {
*dbsp = append(*dbsp, append([]byte{}, sb...))
}
}
if *dbsp == nil {
*dbsp = [][]byte{}
}
}
}
default: // E.g., []byte
mfi.merge = func(dst, src pointer) {
sbp := src.toBytes()
if *sbp != nil {
dbp := dst.toBytes()
if !isProto3 || len(*sbp) > 0 {
*dbp = append([]byte{}, *sbp...)
}
}
}
}
case reflect.Struct:
switch {
case !isPointer:
mergeInfo := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
mergeInfo.merge(dst, src)
}
case isSlice: // E.g., []*pb.T
mergeInfo := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sps := src.getPointerSlice()
if sps != nil {
dps := dst.getPointerSlice()
for _, sp := range sps {
var dp pointer
if !sp.isNil() {
dp = valToPointer(reflect.New(tf))
mergeInfo.merge(dp, sp)
}
dps = append(dps, dp)
}
if dps == nil {
dps = []pointer{}
}
dst.setPointerSlice(dps)
}
}
default: // E.g., *pb.T
mergeInfo := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sp := src.getPointer()
if !sp.isNil() {
dp := dst.getPointer()
if dp.isNil() {
dp = valToPointer(reflect.New(tf))
dst.setPointer(dp)
}
mergeInfo.merge(dp, sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic("bad pointer or slice in map case in " + tf.Name())
default: // E.g., map[K]V
mfi.merge = func(dst, src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
dm := dst.asPointerTo(tf).Elem()
if dm.IsNil() {
dm.Set(reflect.MakeMap(tf))
}
switch tf.Elem().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(Clone(val.Interface().(Message)))
dm.SetMapIndex(key, val)
}
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
dm.SetMapIndex(key, val)
}
default: // Basic type (e.g., string)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
dm.SetMapIndex(key, val)
}
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic("bad pointer or slice in interface case in " + tf.Name())
default: // E.g., interface{}
// TODO: Make this faster?
mfi.merge = func(dst, src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
du := dst.asPointerTo(tf).Elem()
typ := su.Elem().Type()
if du.IsNil() || du.Elem().Type() != typ {
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
}
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
dv := du.Elem().Elem().Field(0)
if dv.Kind() == reflect.Ptr && dv.IsNil() {
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
Merge(dv.Interface().(Message), sv.Interface().(Message))
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
default: // Basic type (e.g., string)
dv.Set(sv)
}
}
}
}
default:
panic(fmt.Sprintf("merger not found for type:%s", tf))
}
mi.fields = append(mi.fields, mfi)
}
mi.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
mi.unrecognized = toField(&f)
}
atomic.StoreInt32(&mi.initialized, 1)
}

2245
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,385 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"io"
"reflect"
)
func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
// First read the message field to see if something is there.
// The semantics of multiple submessages are weird. Instead of
// the last one winning (as it is for all other fields), multiple
// submessages are merged.
v := f // gogo: changed from v := f.getPointer()
if v.isNil() {
v = valToPointer(reflect.New(sub.typ))
f.setPointer(v)
}
err := sub.unmarshal(v, b[:x])
if err != nil {
if r, ok := err.(*RequiredNotSetError); ok {
r.field = name + "." + r.field
} else {
return nil, err
}
}
return b[x:], err
}
}
func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := valToPointer(reflect.New(sub.typ))
err := sub.unmarshal(v, b[:x])
if err != nil {
if r, ok := err.(*RequiredNotSetError); ok {
r.field = name + "." + r.field
} else {
return nil, err
}
}
f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
return b[x:], err
}
}
func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
s.Set(reflect.New(sub.typ))
m := s.Interface().(custom)
if err := m.Unmarshal(b[:x]); err != nil {
return nil, err
}
return b[x:], nil
}
}
func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := reflect.New(sub.typ)
c := m.Interface().(custom)
if err := c.Unmarshal(b[:x]); err != nil {
return nil, err
}
v := valToPointer(m)
f.appendRef(v, sub.typ)
return b[x:], nil
}
}
func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := f.asPointerTo(sub.typ).Interface().(custom)
if err := m.Unmarshal(b[:x]); err != nil {
return nil, err
}
return b[x:], nil
}
}
func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &timestamp{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
t, err := timestampFromProto(m)
if err != nil {
return nil, err
}
s := f.asPointerTo(sub.typ).Elem()
s.Set(reflect.ValueOf(t))
return b[x:], nil
}
}
func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &timestamp{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
t, err := timestampFromProto(m)
if err != nil {
return nil, err
}
s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
s.Set(reflect.ValueOf(&t))
return b[x:], nil
}
}
func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &timestamp{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
t, err := timestampFromProto(m)
if err != nil {
return nil, err
}
slice := f.getSlice(reflect.PtrTo(sub.typ))
newSlice := reflect.Append(slice, reflect.ValueOf(&t))
slice.Set(newSlice)
return b[x:], nil
}
}
func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &timestamp{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
t, err := timestampFromProto(m)
if err != nil {
return nil, err
}
slice := f.getSlice(sub.typ)
newSlice := reflect.Append(slice, reflect.ValueOf(t))
slice.Set(newSlice)
return b[x:], nil
}
}
func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &duration{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
d, err := durationFromProto(m)
if err != nil {
return nil, err
}
s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
s.Set(reflect.ValueOf(&d))
return b[x:], nil
}
}
func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &duration{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
d, err := durationFromProto(m)
if err != nil {
return nil, err
}
s := f.asPointerTo(sub.typ).Elem()
s.Set(reflect.ValueOf(d))
return b[x:], nil
}
}
func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &duration{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
d, err := durationFromProto(m)
if err != nil {
return nil, err
}
slice := f.getSlice(reflect.PtrTo(sub.typ))
newSlice := reflect.Append(slice, reflect.ValueOf(&d))
slice.Set(newSlice)
return b[x:], nil
}
}
func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
return func(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return nil, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
m := &duration{}
if err := Unmarshal(b[:x], m); err != nil {
return nil, err
}
d, err := durationFromProto(m)
if err != nil {
return nil, err
}
slice := f.getSlice(sub.typ)
newSlice := reflect.Append(slice, reflect.ValueOf(d))
slice.Set(newSlice)
return b[x:], nil
}
}

View File

@ -57,7 +57,6 @@ import (
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
@ -177,11 +176,6 @@ func writeName(w *textWriter, props *Properties) error {
return nil
}
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
@ -276,6 +270,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
props := sprops.Prop[i]
name := st.Field(i).Name
if name == "XXX_NoUnkeyedLiteral" {
continue
}
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
@ -366,7 +364,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -383,7 +381,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -447,12 +445,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
if len(props.Enum) > 0 {
if err := tm.writeEnum(w, fv, props); err != nil {
@ -475,7 +467,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
pv = reflect.New(sv.Type())
pv.Elem().Set(sv)
}
if pv.Type().Implements(extensionRangeType) {
if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
@ -484,27 +476,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
@ -605,6 +576,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
}
w.indent()
if v.CanAddr() {
// Calling v.Interface on a struct causes the reflect package to
// copy the entire struct. This is racy with the new Marshaler
// since we atomically update the XXX_sizecache.
//
// Thus, we retrieve a pointer to the struct if possible to avoid
// a race since v.Interface on the pointer doesn't copy the struct.
//
// If v is not addressable, then we are not worried about a race
// since it implies that the binary Marshaler cannot possibly be
// mutating this value.
v = v.Addr()
}
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
@ -613,9 +597,14 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if _, err = w.Write(text); err != nil {
return err
}
} else if err := tm.writeStruct(w, v); err != nil {
} else {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if err := tm.writeStruct(w, v); err != nil {
return err
}
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err

View File

@ -212,7 +212,6 @@ func (p *textParser) advance() {
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
@ -283,60 +282,47 @@ func unescape(s string) (ch string, tail string, err error) {
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
ss := string(r) + s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", err
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
return string(bs), s, nil
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
@ -650,17 +636,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
@ -734,6 +720,9 @@ func (p *textParser) consumeExtName() (string, error) {
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
@ -934,6 +923,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.SetFloat(f)
return nil
}
case reflect.Int8:
if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Int16:
if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
@ -981,9 +980,19 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint8:
if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
fv.SetUint(x)
return nil
}
case reflect.Uint16:
if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
fv.SetUint(x)
return nil
}
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(x)
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
@ -1001,13 +1010,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
return newTextParser(s).readStruct(v.Elem(), "")
}

View File

@ -47,183 +47,3 @@ func (*timestamp) String() string { return "timestamp<string>" }
func init() {
RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
}
func (o *Buffer) decTimestamp() (time.Time, error) {
b, err := o.DecodeRawBytes(true)
if err != nil {
return time.Time{}, err
}
tproto := &timestamp{}
if err := Unmarshal(b, tproto); err != nil {
return time.Time{}, err
}
return timestampFromProto(tproto)
}
func (o *Buffer) dec_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
setPtrCustomType(base, p.field, &t)
return nil
}
func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
setCustomType(base, p.field, &t)
return nil
}
func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
var zero field
setPtrCustomType(newBas, zero, &t)
return nil
}
func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
t, err := o.decTimestamp()
if err != nil {
return err
}
newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
var zero field
setCustomType(newBas, zero, &t)
return nil
}
func size_time(p *Properties, base structPointer) (n int) {
structp := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return 0
}
tim := structPointer_Interface(structp, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return 0
}
size := Size(t)
return size + sizeVarint(uint64(size)) + len(p.tagcode)
}
func (o *Buffer) enc_time(p *Properties, base structPointer) error {
structp := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(structp) {
return ErrNil
}
tim := structPointer_Interface(structp, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return err
}
data, err := Marshal(t)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_ref_time(p *Properties, base structPointer) (n int) {
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return 0
}
size := Size(t)
return size + sizeVarint(uint64(size)) + len(p.tagcode)
}
func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error {
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
t, err := timestampProto(*tim)
if err != nil {
return err
}
data, err := Marshal(t)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
return nil
}
func size_slice_time(p *Properties, base structPointer) (n int) {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
if tims[i] == nil {
return 0
}
tproto, err := timestampProto(*tims[i])
if err != nil {
return 0
}
size := Size(tproto)
n += len(p.tagcode) + size + sizeVarint(uint64(size))
}
return n
}
func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
if tims[i] == nil {
return errRepeatedHasNil
}
tproto, err := timestampProto(*tims[i])
if err != nil {
return err
}
data, err := Marshal(tproto)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}
func size_slice_ref_time(p *Properties, base structPointer) (n int) {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
tproto, err := timestampProto(tims[i])
if err != nil {
return 0
}
size := Size(tproto)
n += len(p.tagcode) + size + sizeVarint(uint64(size))
}
return n
}
func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error {
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
tims := *ptims
for i := 0; i < len(tims); i++ {
tproto, err := timestampProto(tims[i])
if err != nil {
return err
}
data, err := Marshal(tproto)
if err != nil {
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(data)
}
return nil
}

1888
vendor/github.com/gogo/protobuf/proto/wrappers.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

113
vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
type float64Value struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *float64Value) Reset() { *m = float64Value{} }
func (*float64Value) ProtoMessage() {}
func (*float64Value) String() string { return "float64<string>" }
type float32Value struct {
Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *float32Value) Reset() { *m = float32Value{} }
func (*float32Value) ProtoMessage() {}
func (*float32Value) String() string { return "float32<string>" }
type int64Value struct {
Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *int64Value) Reset() { *m = int64Value{} }
func (*int64Value) ProtoMessage() {}
func (*int64Value) String() string { return "int64<string>" }
type uint64Value struct {
Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *uint64Value) Reset() { *m = uint64Value{} }
func (*uint64Value) ProtoMessage() {}
func (*uint64Value) String() string { return "uint64<string>" }
type int32Value struct {
Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *int32Value) Reset() { *m = int32Value{} }
func (*int32Value) ProtoMessage() {}
func (*int32Value) String() string { return "int32<string>" }
type uint32Value struct {
Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *uint32Value) Reset() { *m = uint32Value{} }
func (*uint32Value) ProtoMessage() {}
func (*uint32Value) String() string { return "uint32<string>" }
type boolValue struct {
Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *boolValue) Reset() { *m = boolValue{} }
func (*boolValue) ProtoMessage() {}
func (*boolValue) String() string { return "bool<string>" }
type stringValue struct {
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *stringValue) Reset() { *m = stringValue{} }
func (*stringValue) ProtoMessage() {}
func (*stringValue) String() string { return "string<string>" }
type bytesValue struct {
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *bytesValue) Reset() { *m = bytesValue{} }
func (*bytesValue) ProtoMessage() {}
func (*bytesValue) String() string { return "[]byte<string>" }
func init() {
RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
}

191
vendor/github.com/golang/glog/LICENSE generated vendored
View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

44
vendor/github.com/golang/glog/README generated vendored
View File

@ -1,44 +0,0 @@
glog
====
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
http://code.google.com/p/google-glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.
Through the -vmodule flag, the package also provides fine-grained
control over logging at the file level.
The comment from glog.go introduces the ideas:
Package glog implements logging analogous to the Google-internal
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
Error, Fatal, plus formatting variants such as Infof. It
also provides V-style logging controlled by the -v and
-vmodule=file=2 flags.
Basic examples:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
The repository contains an open source version of the log package
used inside Google. The master copy of the source lives inside
Google, not here. The code in this repo is for export only and is not itself
under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

1177
vendor/github.com/golang/glog/glog.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,124 +0,0 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"errors"
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"time"
)
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}

View File

@ -1,7 +1,4 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View File

@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow

63
vendor/github.com/golang/protobuf/proto/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2018 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import "errors"
// Deprecated: do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func RegisterMessageSetType(Message, int32, string) {}

View File

@ -37,27 +37,9 @@ package proto
import (
"errors"
"fmt"
"reflect"
)
// RequiredNotSetError is the error returned if Marshal is called with
// a protocol buffer struct whose required fields have not
// all been initialized. It is also the error returned if Unmarshal is
// called with an encoded protocol buffer that does not include all the
// required fields.
//
// When printed, RequiredNotSetError reports the first unset required field in a
// message. If the field cannot be precisely determined, it is reported as
// "{Unknown}".
type RequiredNotSetError struct {
field string
}
func (e *RequiredNotSetError) Error() string {
return fmt.Sprintf("proto: required field %q not set", e.field)
}
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.

View File

@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
m1, m2 := e1.value, e2.value
m1 := extensionAsLegacyType(e1.value)
m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.

Some files were not shown because too many files have changed in this diff Show More