overhaul of build/release system, modeled after logspout overhaul

This commit is contained in:
Jeff Lindsay 2015-02-18 11:53:03 -06:00
parent ee8720d50a
commit e2677212c2
225 changed files with 100 additions and 25732 deletions

3
.dockerignore Normal file
View File

@ -0,0 +1,3 @@
release
build
.git

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
release
registrator
build

34
CHANGELOG.md Normal file
View File

@ -0,0 +1,34 @@
# Change Log
All notable changes to this project will be documented in this file.
## [Unreleased][unreleased]
### Fixed
### Added
### Removed
### Changed
## [v5] - 2015-02-18
### Added
- Automated, PR-driven release process
- Development Dockerfile and make task
- CircleCI artifacts for every build
- `--version` flag to see version
### Changed
- Base container is now Alpine
- Built entirely in Docker
- Moved to gliderlabs organization
- New versioning scheme
- Release artifact now saved container image
### Removed
- Dropped unnecessary layers in Dockerfile
- Dropped Godeps for now
[unreleased]: https://github.com/gliderlabs/registrator/compare/v5...HEAD
[v5]: https://github.com/gliderlabs/registrator/compare/v0.4.0...v5

View File

@ -1,8 +1,11 @@
FROM progrium/busybox
MAINTAINER Jeff Lindsay <progrium@gmail.com>
ADD https://github.com/progrium/registrator/releases/download/v0.4.0/registrator_0.4.0_linux_x86_64.tgz /tmp/registrator.tgz
RUN cd /bin && gzip -dc /tmp/registrator.tgz | tar -xf - && rm /tmp/registrator.tgz
ENV DOCKER_HOST unix:///tmp/docker.sock
FROM gliderlabs/alpine:3.1
ENTRYPOINT ["/bin/registrator"]
COPY . /go/src/github.com/gliderlabs/registrator
RUN apk-install -t build-deps go git mercurial \
&& cd /go/src/github.com/gliderlabs/registrator \
&& export GOPATH=/go \
&& go get \
&& go build -ldflags "-X main.Version $(cat VERSION)" -o /bin/registrator \
&& rm -rf /go \
&& apk del --purge build-deps

9
Dockerfile.dev Normal file
View File

@ -0,0 +1,9 @@
FROM gliderlabs/alpine:3.1
CMD ["/bin/registrator"]
ENV GOPATH /go
RUN apk-install go git mercurial
COPY . /go/src/github.com/gliderlabs/registrator
RUN cd /go/src/github.com/gliderlabs/registrator \
&& go get \
&& go build -ldflags "-X main.Version dev" -o /bin/registrator

68
Godeps/Godeps.json generated
View File

@ -1,68 +0,0 @@
{
"ImportPath": "github.com/progrium/registrator",
"GoVersion": "go1.4",
"Deps": [
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.6.2-15-gd2f9ffa",
"Rev": "d2f9ffa1d9cf25b25191b221229effac1b6de526"
},
{
"ImportPath": "github.com/armon/consul-api",
"Rev": "1b81c8e0c4cbf1d382310e4c0dc11221632e79d1"
},
{
"ImportPath": "github.com/cenkalti/backoff",
"Rev": "6a458ae422d1a8cbfb686ac5f4789bd82956c61b"
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
"Comment": "v0.2.0-rc1-127-g6fe04d5",
"Rev": "6fe04d580dfb71c9e34cbce2f4df9eefd1e1241e"
},
{
"ImportPath": "github.com/docker/docker/pkg/archive",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/pkg/fileutils",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/pkg/ioutils",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/pkg/pools",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/pkg/promise",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/pkg/system",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/pkg/units",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
"Comment": "v1.4.1-358-gde97839",
"Rev": "de9783980be2a7b3ca10eb8183ea5989acbd3e7e"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "bc54f3734b9b098e24680d5ad686f602bc152df4"
}
]
}

5
Godeps/Readme generated
View File

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored
View File

@ -1,2 +0,0 @@
/pkg
/bin

View File

@ -1 +0,0 @@
logrus

View File

@ -1,10 +0,0 @@
language: go
go:
- 1.2
- 1.3
- tip
install:
- go get github.com/stretchr/testify
- go get github.com/stvp/go-udp-testing
- go get github.com/tobi/airbrake-go
- go get github.com/getsentry/raven-go

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,355 +0,0 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0), the core API is unlikely change much but please version
control your Logrus to make sure you aren't fetching latest `master` on every
build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
attached, the output is compatible with the
[l2met](http://r.32k.io/l2met-introduction) format:
```text
time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(&logrus_airbrake.AirbrakeHook{})
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
```go
// Not the real implementation of the Airbrake hook. Just a simple sample.
import (
log "github.com/Sirupsen/logrus"
)
func init() {
log.AddHook(new(AirbrakeHook))
}
type AirbrakeHook struct{}
// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
// the fields for the entry. See the Fields section of the README.
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
err := airbrake.Notify(entry.Data["error"].(error))
if err != nil {
log.WithFields(log.Fields{
"source": "airbrake",
"endpoint": airbrake.Endpoint,
}).Info("Failed to send error to Airbrake")
}
return nil
}
// `Levels()` returns a slice of `Levels` the hook is fired for.
func (hook *AirbrakeHook) Levels() []log.Level {
return []log.Level{
log.ErrorLevel,
log.FatalLevel,
log.PanicLevel,
}
}
```
Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
```go
import (
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
log.AddHook(new(logrus_airbrake.AirbrakeHook))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
Send errors to an exception tracking service compatible with the Airbrake API.
Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
Send errors to the Papertrail hosted logging service via UDP.
* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
Send errors to remote syslog server.
Uses standard library `log/syslog` behind the scenes.
* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
Send errors to a channel in hipchat.
* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
Send logs to Loggly (https://www.loggly.com/)
* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
Hook for Slack chat.
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(logrus.JSONFormatter)
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(logrus.TextFormatter)
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotated(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

View File

@ -1,248 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
if err != nil {
return "", err
}
return reader.String(), err
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
func (entry *Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

View File

@ -1,53 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}

View File

@ -1,40 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.JSONFormatter)
log.Formatter = new(logrus.TextFormatter) // default
}
func main() {
defer func() {
err := recover()
if err != nil {
log.WithFields(logrus.Fields{
"omg": true,
"err": err,
"number": 100,
}).Fatal("The ice breaks!")
}
}()
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"animal": "orca",
"size": 9009,
}).Panic("It's over 9000!")
}

View File

@ -1,35 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/tobi/airbrake-go"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
}
func main() {
airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
airbrake.ApiKey = "whatever"
airbrake.Environment = "production"
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}

View File

@ -1,182 +0,0 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View File

@ -1,44 +0,0 @@
package logrus
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
}
}

View File

@ -1,88 +0,0 @@
package logrus
import (
"testing"
"time"
)
// smallFields is a small size data set for benchmarking
var smallFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
// largeFields is a large size data set for benchmarking
var largeFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
"five": "six",
"seven": "eight",
"nine": "ten",
"eleven": "twelve",
"thirteen": "fourteen",
"fifteen": "sixteen",
"seventeen": "eighteen",
"nineteen": "twenty",
"a": "b",
"c": "d",
"e": "f",
"g": "h",
"i": "j",
"k": "l",
"m": "n",
"o": "p",
"q": "r",
"s": "t",
"u": "v",
"w": "x",
"y": "z",
"this": "will",
"make": "thirty",
"entries": "yeah",
}
func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkLargeTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
}
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
}
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
}
func BenchmarkSmallJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, smallFields)
}
func BenchmarkLargeJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, largeFields)
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
}
var d []byte
var err error
for i := 0; i < b.N; i++ {
d, err = formatter.Format(entry)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(d)))
}
}

View File

@ -1,122 +0,0 @@
package logrus
import (
"testing"
"github.com/stretchr/testify/assert"
)
type TestHook struct {
Fired bool
}
func (hook *TestHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *TestHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookFires(t *testing.T) {
hook := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
assert.Equal(t, hook.Fired, false)
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}
type ModifyHook struct {
}
func (hook *ModifyHook) Fire(entry *Entry) error {
entry.Data["wow"] = "whale"
return nil
}
func (hook *ModifyHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookCanModifyEntry(t *testing.T) {
hook := new(ModifyHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
})
}
func TestCanFireMultipleHooks(t *testing.T) {
hook1 := new(ModifyHook)
hook2 := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook1)
log.Hooks.Add(hook2)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
assert.Equal(t, hook2.Fired, true)
})
}
type ErrorHook struct {
Fired bool
}
func (hook *ErrorHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *ErrorHook) Levels() []Level {
return []Level{
ErrorLevel,
}
}
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, false)
})
}
func TestErrorHookShouldFireOnError(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Error("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}

View File

@ -1,34 +0,0 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type levelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks levelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View File

@ -1,54 +0,0 @@
package logrus_airbrake
import (
"github.com/Sirupsen/logrus"
"github.com/tobi/airbrake-go"
)
// AirbrakeHook to send exceptions to an exception-tracking service compatible
// with the Airbrake API. You must set:
// * airbrake.Endpoint
// * airbrake.ApiKey
// * airbrake.Environment (only sends exceptions when set to "production")
//
// Before using this hook, to send an error. Entries that trigger an Error,
// Fatal or Panic should now include an "error" field to send to Airbrake.
type AirbrakeHook struct{}
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
if entry.Data["error"] == nil {
entry.Logger.WithFields(logrus.Fields{
"source": "airbrake",
"endpoint": airbrake.Endpoint,
}).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
return nil
}
err, ok := entry.Data["error"].(error)
if !ok {
entry.Logger.WithFields(logrus.Fields{
"source": "airbrake",
"endpoint": airbrake.Endpoint,
}).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
return nil
}
airErr := airbrake.Notify(err)
if airErr != nil {
entry.Logger.WithFields(logrus.Fields{
"source": "airbrake",
"endpoint": airbrake.Endpoint,
"error": airErr,
}).Warn("Failed to send error to Airbrake")
}
return nil
}
func (hook *AirbrakeHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}

View File

@ -1,28 +0,0 @@
# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
## Usage
You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/papertrail"
)
func main() {
log := logrus.New()
hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
if err == nil {
log.Hooks.Add(hook)
}
}
```

View File

@ -1,55 +0,0 @@
package logrus_papertrail
import (
"fmt"
"net"
"os"
"time"
"github.com/Sirupsen/logrus"
)
const (
format = "Jan 2 15:04:05"
)
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
type PapertrailHook struct {
Host string
Port int
AppName string
UDPConn net.Conn
}
// NewPapertrailHook creates a hook to be added to an instance of logger.
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
return &PapertrailHook{host, port, appName, conn}, err
}
// Fire is called when a log event is fired.
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
date := time.Now().Format(format)
msg, _ := entry.String()
payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
return err
}
return nil
}
// Levels returns the available logging levels.
func (hook *PapertrailHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View File

@ -1,26 +0,0 @@
package logrus_papertrail
import (
"fmt"
"testing"
"github.com/Sirupsen/logrus"
"github.com/stvp/go-udp-testing"
)
func TestWritingToUDP(t *testing.T) {
port := 16661
udp.SetAddr(fmt.Sprintf(":%d", port))
hook, err := NewPapertrailHook("localhost", port, "test")
if err != nil {
t.Errorf("Unable to connect to local UDP server.")
}
log := logrus.New()
log.Hooks.Add(hook)
udp.ShouldReceive(t, "foo", func() {
log.Info("foo")
})
}

View File

@ -1,61 +0,0 @@
# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
[Sentry](https://getsentry.com) provides both self-hosted and hosted
solutions for exception tracking.
Both client and server are
[open source](https://github.com/getsentry/sentry).
## Usage
Every sentry application defined on the server gets a different
[DSN](https://www.getsentry.com/docs/). In the example below replace
`YOUR_DSN` with the one created for your application.
```go
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/sentry"
)
func main() {
log := logrus.New()
hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
})
if err == nil {
log.Hooks.Add(hook)
}
}
```
## Special fields
Some logrus fields have a special meaning in this hook,
these are server_name and logger.
When logs are sent to sentry these fields are treated differently.
- server_name (also known as hostname) is the name of the server which
is logging the event (hostname.example.com)
- logger is the part of the application which is logging the event.
In go this usually means setting it to the name of the package.
## Timeout
`Timeout` is the time the sentry hook will wait for a response
from the sentry server.
If this time elapses with no response from
the server an error will be returned.
If `Timeout` is set to 0 the SentryHook will not wait for a reply
and will assume a correct delivery.
The SentryHook has a default timeout of `100 milliseconds` when created
with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
```go
hook, _ := logrus_sentry.NewSentryHook(...)
hook.Timeout = 20*time.Seconds
```

View File

@ -1,100 +0,0 @@
package logrus_sentry
import (
"fmt"
"time"
"github.com/Sirupsen/logrus"
"github.com/getsentry/raven-go"
)
var (
severityMap = map[logrus.Level]raven.Severity{
logrus.DebugLevel: raven.DEBUG,
logrus.InfoLevel: raven.INFO,
logrus.WarnLevel: raven.WARNING,
logrus.ErrorLevel: raven.ERROR,
logrus.FatalLevel: raven.FATAL,
logrus.PanicLevel: raven.FATAL,
}
)
func getAndDel(d logrus.Fields, key string) (string, bool) {
var (
ok bool
v interface{}
val string
)
if v, ok = d[key]; !ok {
return "", false
}
if val, ok = v.(string); !ok {
return "", false
}
delete(d, key)
return val, true
}
// SentryHook delivers logs to a sentry server.
type SentryHook struct {
// Timeout sets the time to wait for a delivery error from the sentry server.
// If this is set to zero the server will not wait for any response and will
// consider the message correctly sent
Timeout time.Duration
client *raven.Client
levels []logrus.Level
}
// NewSentryHook creates a hook to be added to an instance of logger
// and initializes the raven client.
// This method sets the timeout to 100 milliseconds.
func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
client, err := raven.NewClient(DSN, nil)
if err != nil {
return nil, err
}
return &SentryHook{100 * time.Millisecond, client, levels}, nil
}
// Called when an event should be sent to sentry
// Special fields that sentry uses to give more information to the server
// are extracted from entry.Data (if they are found)
// These fields are: logger and server_name
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
packet := &raven.Packet{
Message: entry.Message,
Timestamp: raven.Timestamp(entry.Time),
Level: severityMap[entry.Level],
Platform: "go",
}
d := entry.Data
if logger, ok := getAndDel(d, "logger"); ok {
packet.Logger = logger
}
if serverName, ok := getAndDel(d, "server_name"); ok {
packet.ServerName = serverName
}
packet.Extra = map[string]interface{}(d)
_, errCh := hook.client.Capture(packet, nil)
timeout := hook.Timeout
if timeout != 0 {
timeoutCh := time.After(timeout)
select {
case err := <-errCh:
return err
case <-timeoutCh:
return fmt.Errorf("no response from sentry server in %s", timeout)
}
}
return nil
}
// Levels returns the available logging levels.
func (hook *SentryHook) Levels() []logrus.Level {
return hook.levels
}

View File

@ -1,97 +0,0 @@
package logrus_sentry
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/Sirupsen/logrus"
"github.com/getsentry/raven-go"
)
const (
message = "error message"
server_name = "testserver.internal"
logger_name = "test.logger"
)
func getTestLogger() *logrus.Logger {
l := logrus.New()
l.Out = ioutil.Discard
return l
}
func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
pch := make(chan *raven.Packet, 1)
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
d := json.NewDecoder(req.Body)
p := &raven.Packet{}
err := d.Decode(p)
if err != nil {
t.Fatal(err.Error())
}
pch <- p
}))
defer s.Close()
fragments := strings.SplitN(s.URL, "://", 2)
dsn := fmt.Sprintf(
"%s://public:secret@%s/sentry/project-id",
fragments[0],
fragments[1],
)
tf(dsn, pch)
}
func TestSpecialFields(t *testing.T) {
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
logger := getTestLogger()
hook, err := NewSentryHook(dsn, []logrus.Level{
logrus.ErrorLevel,
})
if err != nil {
t.Fatal(err.Error())
}
logger.Hooks.Add(hook)
logger.WithFields(logrus.Fields{
"server_name": server_name,
"logger": logger_name,
}).Error(message)
packet := <-pch
if packet.Logger != logger_name {
t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
}
if packet.ServerName != server_name {
t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
}
})
}
func TestSentryHandler(t *testing.T) {
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
logger := getTestLogger()
hook, err := NewSentryHook(dsn, []logrus.Level{
logrus.ErrorLevel,
})
if err != nil {
t.Fatal(err.Error())
}
logger.Hooks.Add(hook)
logger.Error(message)
packet := <-pch
if packet.Message != message {
t.Errorf("message should have been %s, was %s", message, packet.Message)
}
})
}

View File

@ -1,20 +0,0 @@
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
## Usage
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```

View File

@ -1,59 +0,0 @@
package logrus_syslog
import (
"fmt"
"github.com/Sirupsen/logrus"
"log/syslog"
"os"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
}
// Creates a hook to be added to an instance of logger. This is called with
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
// `if err == nil { log.Hooks.Add(hook) }`
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
return &SyslogHook{w, network, raddr}, err
}
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return hook.Writer.Crit(line)
case logrus.FatalLevel:
return hook.Writer.Crit(line)
case logrus.ErrorLevel:
return hook.Writer.Err(line)
case logrus.WarnLevel:
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
case logrus.DebugLevel:
return hook.Writer.Debug(line)
default:
return nil
}
}
func (hook *SyslogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View File

@ -1,26 +0,0 @@
package logrus_syslog
import (
"github.com/Sirupsen/logrus"
"log/syslog"
"testing"
)
func TestLocalhostAddAndPrint(t *testing.T) {
log := logrus.New()
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
t.Errorf("Unable to connect to local syslog.")
}
log.Hooks.Add(hook)
for _, level := range hook.Levels() {
if len(log.Hooks[level]) != 1 {
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
}
}
log.Info("Congratulations!")
}

View File

@ -1,26 +0,0 @@
package logrus
import (
"encoding/json"
"fmt"
"time"
)
type JSONFormatter struct{}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
data[k] = v
}
prefixFieldClashes(data)
data["time"] = entry.Time.Format(time.RFC3339)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@ -1,161 +0,0 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stdout`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks levelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(levelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stdout,
Formatter: new(TextFormatter),
Hooks: make(levelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Ff you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
NewEntry(logger).Debugf(format, args...)
}
func (logger *Logger) Infof(format string, args ...interface{}) {
NewEntry(logger).Infof(format, args...)
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
NewEntry(logger).Warnf(format, args...)
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
NewEntry(logger).Warnf(format, args...)
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
NewEntry(logger).Errorf(format, args...)
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
NewEntry(logger).Fatalf(format, args...)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
NewEntry(logger).Panicf(format, args...)
}
func (logger *Logger) Debug(args ...interface{}) {
NewEntry(logger).Debug(args...)
}
func (logger *Logger) Info(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Warn(args ...interface{}) {
NewEntry(logger).Warn(args...)
}
func (logger *Logger) Warning(args ...interface{}) {
NewEntry(logger).Warn(args...)
}
func (logger *Logger) Error(args ...interface{}) {
NewEntry(logger).Error(args...)
}
func (logger *Logger) Fatal(args ...interface{}) {
NewEntry(logger).Fatal(args...)
}
func (logger *Logger) Panic(args ...interface{}) {
NewEntry(logger).Panic(args...)
}
func (logger *Logger) Debugln(args ...interface{}) {
NewEntry(logger).Debugln(args...)
}
func (logger *Logger) Infoln(args ...interface{}) {
NewEntry(logger).Infoln(args...)
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
}
func (logger *Logger) Warnln(args ...interface{}) {
NewEntry(logger).Warnln(args...)
}
func (logger *Logger) Warningln(args ...interface{}) {
NewEntry(logger).Warnln(args...)
}
func (logger *Logger) Errorln(args ...interface{}) {
NewEntry(logger).Errorln(args...)
}
func (logger *Logger) Fatalln(args ...interface{}) {
NewEntry(logger).Fatalln(args...)
}
func (logger *Logger) Panicln(args ...interface{}) {
NewEntry(logger).Panicln(args...)
}

View File

@ -1,94 +0,0 @@
package logrus
import (
"fmt"
"log"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var _ StdLogger = &log.Logger{}
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

View File

@ -1,283 +0,0 @@
package logrus
import (
"bytes"
"encoding/json"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
log(logger)
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
var buffer bytes.Buffer
logger := New()
logger.Out = &buffer
logger.Formatter = &TextFormatter{
DisableColors: true,
}
log(logger)
fields := make(map[string]string)
for _, kv := range strings.Split(buffer.String(), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
assert.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func TestPrint(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestInfo(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestWarn(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Warn("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "warning")
})
}
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test test")
})
}
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test 10")
})
}
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test10")
})
}
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "testtest")
})
}
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
localLog := logger.WithFields(Fields{
"key1": "value1",
})
localLog.WithField("key2", "value2").Info("test")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assert.Equal(t, "value2", fields["key2"])
assert.Equal(t, "value1", fields["key1"])
buffer = bytes.Buffer{}
fields = Fields{}
localLog.Info("test")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
_, ok := fields["key2"]
assert.Equal(t, false, ok)
assert.Equal(t, "value1", fields["key1"])
}
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
})
}
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["fields.msg"], "hello")
})
}
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("time", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["fields.time"], "hello")
})
}
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("level", 1).Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["level"], "info")
assert.Equal(t, fields["fields.level"], 1)
})
}
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
LogAndAssertText(t, func(log *Logger) {
ll := log.WithField("herp", "derp")
ll.Info("hello")
ll.Info("bye")
}, func(fields map[string]string) {
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
if _, ok := fields[fieldName]; ok {
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
}
}
})
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
llog := logger.WithField("context", "eating raw fish")
llog.Info("looks delicious")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded first message")
assert.Len(t, fields, 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "looks delicious")
assert.Equal(t, fields["context"], "eating raw fish")
buffer.Reset()
llog.Warn("omg it is!")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded second message")
assert.Len(t, fields, 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "omg it is!")
assert.Equal(t, fields["context"], "eating raw fish")
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
}
func TestConvertLevelToString(t *testing.T) {
assert.Equal(t, "debug", DebugLevel.String())
assert.Equal(t, "info", InfoLevel.String())
assert.Equal(t, "warning", WarnLevel.String())
assert.Equal(t, "error", ErrorLevel.String())
assert.Equal(t, "fatal", FatalLevel.String())
assert.Equal(t, "panic", PanicLevel.String())
}
func TestParseLevel(t *testing.T) {
l, err := ParseLevel("panic")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("fatal")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("error")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("warn")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("warning")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("info")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("debug")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("invalid")
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
}

View File

@ -1,12 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -1,20 +0,0 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View File

@ -1,12 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -1,21 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,!appengine darwin freebsd
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@ -1,27 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View File

@ -1,124 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"regexp"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
)
var (
baseTimestamp time.Time
isTerminal bool
noQuoteNeeded *regexp.Regexp
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
DisableColors bool
// Set to true to disable timestamp logging (useful when the output
// is redirected to a logging system already adding a timestamp)
DisableTimestamp bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string
for k := range entry.Data {
keys = append(keys, k)
}
sort.Strings(keys)
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
if isColored {
printColored(b, entry, keys)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message)
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
var levelColor int
switch entry.Level {
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch < '9') ||
ch == '-' || ch == '.') {
return false
}
}
return true
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
switch value.(type) {
case string:
if needsQuoting(value.(string)) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
case error:
if needsQuoting(value.(error).Error()) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
default:
fmt.Fprintf(b, "%v=%v ", key, value)
}
}

View File

@ -1,33 +0,0 @@
package logrus
import (
"bytes"
"errors"
"testing"
)
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
cont := bytes.Contains(b[idx+5:], []byte{'"'})
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
} else {
t.Errorf("quoting not expected for: %#v", value)
}
}
}
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(true, "/foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
}

View File

@ -1,23 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -1,362 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,39 +0,0 @@
consul-api
==========
This package provides the `consulapi` package which attempts to
provide programmatic access to the full Consul API.
Currently, all of the Consul APIs included in version 0.3 are supported.
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/consul-api)
Usage
=====
Below is an example of using the Consul client:
```go
// Get a new client, with KV endpoints
client, _ := consulapi.NewClient(consulapi.DefaultConfig())
kv := client.KV()
// PUT a new KV pair
p := &consulapi.KVPair{Key: "foo", Value: []byte("test")}
_, err := kv.Put(p, nil)
if err != nil {
panic(err)
}
// Lookup the pair
pair, _, err := kv.Get("foo", nil)
if err != nil {
panic(err)
}
fmt.Printf("KV: %v", pair)
```

View File

@ -1,140 +0,0 @@
package consulapi
const (
// ACLCLientType is the client type token
ACLClientType = "client"
// ACLManagementType is the management type token
ACLManagementType = "management"
)
// ACLEntry is used to represent an ACL entry
type ACLEntry struct {
CreateIndex uint64
ModifyIndex uint64
ID string
Name string
Type string
Rules string
}
// ACL can be used to query the ACL endpoints
type ACL struct {
c *Client
}
// ACL returns a handle to the ACL endpoints
func (c *Client) ACL() *ACL {
return &ACL{c}
}
// Create is used to generate a new token with the given parameters
func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/create")
r.setWriteOptions(q)
r.obj = acl
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Update is used to update the rules of an existing token
func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/update")
r.setWriteOptions(q)
r.obj = acl
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Destroy is used to destroy a given ACL token ID
func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Clone is used to return a new token cloned from an existing one
func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Info is used to query for information about an ACL token
func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/info/"+id)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to get all the ACL tokens
func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/list")
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View File

@ -1,140 +0,0 @@
package consulapi
import (
"os"
"testing"
)
// ROOT is a management token for the tests
var CONSUL_ROOT string
func init() {
CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
}
func TestACL_CreateDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae := ACLEntry{
Name: "API test",
Type: ACLClientType,
Rules: `key "" { policy = "deny" }`,
}
id, wm, err := acl.Create(&ae, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
ae2, _, err := acl.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
t.Fatalf("Bad: %#v", ae2)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_CloneDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
id, wm, err := acl.Clone(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_Info(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae, qm, err := acl.Info(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
t.Fatalf("bad: %#v", ae)
}
}
func TestACL_List(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
acls, qm, err := acl.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(acls) < 2 {
t.Fatalf("bad: %v", acls)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}

View File

@ -1,272 +0,0 @@
package consulapi
import (
"fmt"
)
// AgentCheck represents a check known to the agent
type AgentCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
}
// AgentService represents a service known to the agent
type AgentService struct {
ID string
Service string
Tags []string
Port int
}
// AgentMember represents a cluster member known to the agent
type AgentMember struct {
Name string
Addr string
Port uint16
Tags map[string]string
Status int
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// AgentServiceRegistration is used to register a new service
type AgentServiceRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Check *AgentServiceCheck
}
// AgentCheckRegistration is used to register a new check
type AgentCheckRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Notes string `json:",omitempty"`
AgentServiceCheck
}
// AgentServiceCheck is used to create an associated
// check for a service
type AgentServiceCheck struct {
Script string `json:",omitempty"`
Interval string `json:",omitempty"`
TTL string `json:",omitempty"`
}
// Agent can be used to query the Agent endpoints
type Agent struct {
c *Client
// cache the node name
nodeName string
}
// Agent returns a handle to the agent endpoints
func (c *Client) Agent() *Agent {
return &Agent{c: c}
}
// Self is used to query the agent we are speaking to for
// information about itself
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
r := a.c.newRequest("GET", "/v1/agent/self")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// NodeName is used to get the node name of the agent
func (a *Agent) NodeName() (string, error) {
if a.nodeName != "" {
return a.nodeName, nil
}
info, err := a.Self()
if err != nil {
return "", err
}
name := info["Config"]["NodeName"].(string)
a.nodeName = name
return name, nil
}
// Checks returns the locally registered checks
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentCheck
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Services returns the locally registered services
func (a *Agent) Services() (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentService
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Members returns the known gossip members. The WAN
// flag can be used to query a server for WAN members.
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
r := a.c.newRequest("GET", "/v1/agent/members")
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// ServiceRegister is used to register a new service with
// the local agent
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/service/register")
r.obj = service
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ServiceDeregister is used to deregister a service with
// the local agent
func (a *Agent) ServiceDeregister(serviceID string) error {
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// PassTTL is used to set a TTL check to the passing state
func (a *Agent) PassTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "pass")
}
// WarnTTL is used to set a TTL check to the warning state
func (a *Agent) WarnTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "warn")
}
// FailTTL is used to set a TTL check to the failing state
func (a *Agent) FailTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "fail")
}
// UpdateTTL is used to update the TTL of a check
func (a *Agent) UpdateTTL(checkID, note, status string) error {
switch status {
case "pass":
case "warn":
case "fail":
default:
return fmt.Errorf("Invalid status: %s", status)
}
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
r := a.c.newRequest("PUT", endpoint)
r.params.Set("note", note)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckRegister is used to register a new check with
// the local agent
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/check/register")
r.obj = check
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckDeregister is used to deregister a check with
// the local agent
func (a *Agent) CheckDeregister(checkID string) error {
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Join is used to instruct the agent to attempt a join to
// another cluster member
func (a *Agent) Join(addr string, wan bool) error {
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ForceLeave is used to have the agent eject a failed node
func (a *Agent) ForceLeave(node string) error {
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -1,162 +0,0 @@
package consulapi
import (
"testing"
)
func TestAgent_Self(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"]
if name == "" {
t.Fatalf("bad: %v", info)
}
}
func TestAgent_Members(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
members, err := agent.Members(false)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(members) != 1 {
t.Fatalf("bad: %v", members)
}
}
func TestAgent_Services(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Tags: []string{"bar", "baz"},
Port: 8000,
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
services, err := agent.Services()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := services["foo"]; !ok {
t.Fatalf("missing service: %v", services)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := checks["service:foo"]; !ok {
t.Fatalf("missing check: %v", checks)
}
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_SetTTLStatus(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
if err := agent.WarnTTL("service:foo", "test"); err != nil {
t.Fatalf("err: %v", err)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
chk, ok := checks["service:foo"]
if !ok {
t.Fatalf("missing check: %v", checks)
}
if chk.Status != "warning" {
t.Fatalf("Bad: %#v", chk)
}
if chk.Output != "test" {
t.Fatalf("Bad: %#v", chk)
}
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_Checks(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
reg := &AgentCheckRegistration{
Name: "foo",
}
reg.TTL = "15s"
if err := agent.CheckRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := checks["foo"]; !ok {
t.Fatalf("missing check: %v", checks)
}
if err := agent.CheckDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_Join(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
// Join ourself
addr := info["Config"]["AdvertiseAddr"].(string)
err = agent.Join(addr, false)
if err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAgent_ForceLeave(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
// Eject somebody
err := agent.ForceLeave("foo")
if err != nil {
t.Fatalf("err: %v", err)
}
}

View File

@ -1,305 +0,0 @@
package consulapi
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
// QueryOptions are used to parameterize a query
type QueryOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// AllowStale allows any Consul server (non-leader) to service
// a read. This allows for lower latency and higher throughput
AllowStale bool
// RequireConsistent forces the read to be fully consistent.
// This is more expensive but prevents ever performing a stale
// read.
RequireConsistent bool
// WaitIndex is used to enable a blocking query. Waits
// until the timeout or the next index is reached
WaitIndex uint64
// WaitTime is used to bound the duration of a wait.
// Defaults to that of the Config, but can be overriden.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// WriteOptions are used to parameterize a write
type WriteOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// QueryMeta is used to return meta data about a query
type QueryMeta struct {
// LastIndex. This can be used as a WaitIndex to perform
// a blocking query
LastIndex uint64
// Time of last contact from the leader for the
// server servicing the request
LastContact time.Duration
// Is there a known leader
KnownLeader bool
// How long did the request take
RequestTime time.Duration
}
// WriteMeta is used to return meta data about a write
type WriteMeta struct {
// How long did the request take
RequestTime time.Duration
}
// Config is used to configure the creation of a client
type Config struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// HttpClient is the client to use. Default will be
// used if not provided.
HttpClient *http.Client
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
}
// DefaultConfig returns a default configuration for the client
func DefaultConfig() *Config {
return &Config{
Address: "127.0.0.1:8500",
Scheme: "http",
HttpClient: http.DefaultClient,
}
}
// Client provides a client to the Consul API
type Client struct {
config Config
}
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.Address) == 0 {
config.Address = defConfig.Address
}
if len(config.Scheme) == 0 {
config.Scheme = defConfig.Scheme
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
client := &Client{
config: *config,
}
return client, nil
}
// request is used to help build up a request
type request struct {
config *Config
method string
url *url.URL
params url.Values
body io.Reader
obj interface{}
}
// setQueryOptions is used to annotate the request with
// additional query options
func (r *request) setQueryOptions(q *QueryOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.AllowStale {
r.params.Set("stale", "")
}
if q.RequireConsistent {
r.params.Set("consistent", "")
}
if q.WaitIndex != 0 {
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
}
if q.WaitTime != 0 {
r.params.Set("wait", durToMsec(q.WaitTime))
}
if q.Token != "" {
r.params.Set("token", q.Token)
}
}
// durToMsec converts a duration to a millisecond specified string
func durToMsec(dur time.Duration) string {
return fmt.Sprintf("%dms", dur/time.Millisecond)
}
// setWriteOptions is used to annotate the request with
// additional write options
func (r *request) setWriteOptions(q *WriteOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.Token != "" {
r.params.Set("token", q.Token)
}
}
// toHTTP converts the request to an HTTP request
func (r *request) toHTTP() (*http.Request, error) {
// Encode the query parameters
r.url.RawQuery = r.params.Encode()
// Get the url sring
urlRaw := r.url.String()
// Check if we should encode the body
if r.body == nil && r.obj != nil {
if b, err := encodeBody(r.obj); err != nil {
return nil, err
} else {
r.body = b
}
}
// Create the HTTP request
return http.NewRequest(r.method, urlRaw, r.body)
}
// newRequest is used to create a new request
func (c *Client) newRequest(method, path string) *request {
r := &request{
config: &c.config,
method: method,
url: &url.URL{
Scheme: c.config.Scheme,
Host: c.config.Address,
Path: path,
},
params: make(map[string][]string),
}
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
if c.config.WaitTime != 0 {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}
if c.config.Token != "" {
r.params.Set("token", r.config.Token)
}
return r
}
// doRequest runs a request with our client
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return 0, nil, err
}
start := time.Now()
resp, err := c.config.HttpClient.Do(req)
diff := time.Now().Sub(start)
return diff, resp, err
}
// parseQueryMeta is used to help parse query meta-data
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
header := resp.Header
// Parse the X-Consul-Index
index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
}
q.LastIndex = index
// Parse the X-Consul-LastContact
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
}
q.LastContact = time.Duration(last) * time.Millisecond
// Parse the X-Consul-KnownLeader
switch header.Get("X-Consul-KnownLeader") {
case "true":
q.KnownLeader = true
default:
q.KnownLeader = false
}
return nil
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// requireOK is used to wrap doRequest and check for a 200
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
if e != nil {
return d, resp, e
}
if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
return d, resp, e
}

View File

@ -1,126 +0,0 @@
package consulapi
import (
crand "crypto/rand"
"fmt"
"net/http"
"testing"
"time"
)
func makeClient(t *testing.T) *Client {
conf := DefaultConfig()
client, err := NewClient(conf)
if err != nil {
t.Fatalf("err: %v", err)
}
return client
}
func testKey() string {
buf := make([]byte, 16)
if _, err := crand.Read(buf); err != nil {
panic(fmt.Errorf("Failed to read random bytes: %v", err))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
}
func TestSetQueryOptions(t *testing.T) {
c := makeClient(t)
r := c.newRequest("GET", "/v1/kv/foo")
q := &QueryOptions{
Datacenter: "foo",
AllowStale: true,
RequireConsistent: true,
WaitIndex: 1000,
WaitTime: 100 * time.Second,
Token: "12345",
}
r.setQueryOptions(q)
if r.params.Get("dc") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["stale"]; !ok {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["consistent"]; !ok {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("index") != "1000" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("wait") != "100000ms" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("token") != "12345" {
t.Fatalf("bad: %v", r.params)
}
}
func TestSetWriteOptions(t *testing.T) {
c := makeClient(t)
r := c.newRequest("GET", "/v1/kv/foo")
q := &WriteOptions{
Datacenter: "foo",
Token: "23456",
}
r.setWriteOptions(q)
if r.params.Get("dc") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("token") != "23456" {
t.Fatalf("bad: %v", r.params)
}
}
func TestRequestToHTTP(t *testing.T) {
c := makeClient(t)
r := c.newRequest("DELETE", "/v1/kv/foo")
q := &QueryOptions{
Datacenter: "foo",
}
r.setQueryOptions(q)
req, err := r.toHTTP()
if err != nil {
t.Fatalf("err: %v", err)
}
if req.Method != "DELETE" {
t.Fatalf("bad: %v", req)
}
if req.URL.String() != "http://127.0.0.1:8500/v1/kv/foo?dc=foo" {
t.Fatalf("bad: %v", req)
}
}
func TestParseQueryMeta(t *testing.T) {
resp := &http.Response{
Header: make(map[string][]string),
}
resp.Header.Set("X-Consul-Index", "12345")
resp.Header.Set("X-Consul-LastContact", "80")
resp.Header.Set("X-Consul-KnownLeader", "true")
qm := &QueryMeta{}
if err := parseQueryMeta(resp, qm); err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex != 12345 {
t.Fatalf("Bad: %v", qm)
}
if qm.LastContact != 80*time.Millisecond {
t.Fatalf("Bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("Bad: %v", qm)
}
}

View File

@ -1,181 +0,0 @@
package consulapi
type Node struct {
Node string
Address string
}
type CatalogService struct {
Node string
Address string
ServiceID string
ServiceName string
ServiceTags []string
ServicePort int
}
type CatalogNode struct {
Node *Node
Services map[string]*AgentService
}
type CatalogRegistration struct {
Node string
Address string
Datacenter string
Service *AgentService
Check *AgentCheck
}
type CatalogDeregistration struct {
Node string
Address string
Datacenter string
ServiceID string
CheckID string
}
// Catalog can be used to query the Catalog endpoints
type Catalog struct {
c *Client
}
// Catalog returns a handle to the catalog endpoints
func (c *Client) Catalog() *Catalog {
return &Catalog{c}
}
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/register")
r.setWriteOptions(q)
r.obj = reg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
r.setWriteOptions(q)
r.obj = dereg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Datacenters is used to query for all the known datacenters
func (c *Catalog) Datacenters() ([]string, error) {
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []string
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to query all the known nodes
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Node
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Services is used to query for all known services
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/services")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]string
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query catalog entries for a given service
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
}
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CatalogService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Node is used to query for service information about a single node
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *CatalogNode
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -1,219 +0,0 @@
package consulapi
import (
"testing"
)
func TestCatalog_Datacenters(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
datacenters, err := catalog.Datacenters()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(datacenters) == 0 {
t.Fatalf("Bad: %v", datacenters)
}
}
func TestCatalog_Nodes(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
nodes, meta, err := catalog.Nodes(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(nodes) == 0 {
t.Fatalf("Bad: %v", nodes)
}
}
func TestCatalog_Services(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
services, meta, err := catalog.Services(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
t.Fatalf("Bad: %v", services)
}
}
func TestCatalog_Service(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
services, meta, err := catalog.Service("consul", "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
t.Fatalf("Bad: %v", services)
}
}
func TestCatalog_Node(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
name, _ := c.Agent().NodeName()
info, meta, err := catalog.Node(name, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("Bad: %v", meta)
}
if len(info.Services) == 0 {
t.Fatalf("Bad: %v", info)
}
}
func TestCatalog_Registration(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
service := &AgentService{
ID: "redis1",
Service: "redis",
Tags: []string{"master", "v1"},
Port: 8000,
}
check := &AgentCheck{
Node: "foobar",
CheckID: "service:redis1",
Name: "Redis health check",
Notes: "Script based health check",
Status: "passing",
ServiceID: "redis1",
}
reg := &CatalogRegistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
Service: service,
Check: check,
}
_, err := catalog.Register(reg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := node.Services["redis1"]; !ok {
t.Fatalf("missing service: redis1")
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if health[0].CheckID != "service:redis1" {
t.Fatalf("missing checkid service:redis1")
}
}
func TestCatalog_Deregistration(t *testing.T) {
c := makeClient(t)
catalog := c.Catalog()
dereg := &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
ServiceID: "redis1",
}
_, err := catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := node.Services["redis1"]; ok {
t.Fatalf("ServiceID:redis1 is not deregistered")
}
dereg = &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
CheckID: "service:redis1",
}
_, err = catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(health) != 0 {
t.Fatalf("CheckID:service:redis1 is not deregistered")
}
dereg = &CatalogDeregistration{
Datacenter: "dc1",
Node: "foobar",
Address: "192.168.10.10",
}
_, err = catalog.Deregister(dereg, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
node, _, err = catalog.Node("foobar", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if node != nil {
t.Fatalf("node is not deregistered: %v", node)
}
}

View File

@ -1,104 +0,0 @@
package consulapi
import (
"bytes"
"strconv"
)
// Event can be used to query the Event endpoints
type Event struct {
c *Client
}
// UserEvent represents an event that was fired by the user
type UserEvent struct {
ID string
Name string
Payload []byte
NodeFilter string
ServiceFilter string
TagFilter string
Version int
LTime uint64
}
// Session returns a handle to the session endpoints
func (c *Client) Event() *Event {
return &Event{c}
}
// Fire is used to fire a new user event. Only the Name, Payload and Filters
// are respected. This returns the ID or an associated error. Cross DC requests
// are supported.
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
r.setWriteOptions(q)
if params.NodeFilter != "" {
r.params.Set("node", params.NodeFilter)
}
if params.ServiceFilter != "" {
r.params.Set("service", params.ServiceFilter)
}
if params.TagFilter != "" {
r.params.Set("tag", params.TagFilter)
}
if params.Payload != nil {
r.body = bytes.NewReader(params.Payload)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out UserEvent
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// List is used to get the most recent events an agent has received.
// This list can be optionally filtered by the name. This endpoint supports
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
// LastContact or KnownLeader.
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
r := e.c.newRequest("GET", "/v1/event/list")
r.setQueryOptions(q)
if name != "" {
r.params.Set("name", name)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*UserEvent
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// IDToIndex is a bit of a hack. This simulates the index generation to
// convert an event ID into a WaitIndex.
func (e *Event) IDToIndex(uuid string) uint64 {
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
upper := uuid[19:23] + uuid[24:36]
lowVal, err := strconv.ParseUint(lower, 16, 64)
if err != nil {
panic("Failed to convert " + lower)
}
highVal, err := strconv.ParseUint(upper, 16, 64)
if err != nil {
panic("Failed to convert " + upper)
}
return lowVal ^ highVal
}

View File

@ -1,37 +0,0 @@
package consulapi
import (
"testing"
)
func TestEvent_FireList(t *testing.T) {
c := makeClient(t)
event := c.Event()
params := &UserEvent{Name: "foo"}
id, meta, err := event.Fire(params, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
events, qm, err := event.List("", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex != event.IDToIndex(id) {
t.Fatalf("Bad: %#v", qm)
}
if events[len(events)-1].ID != id {
t.Fatalf("bad: %#v", events)
}
}

View File

@ -1,136 +0,0 @@
package consulapi
import (
"fmt"
)
// HealthCheck is used to represent a single check
type HealthCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
}
// ServiceEntry is used for the health service endpoint
type ServiceEntry struct {
Node *Node
Service *AgentService
Checks []*HealthCheck
}
// Health can be used to query the Health endpoints
type Health struct {
c *Client
}
// Health returns a handle to the health endpoints
func (c *Client) Health() *Health {
return &Health{c}
}
// Node is used to query for checks belonging to a given node
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Checks is used to return the checks associated with a service
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query health information along with service info
// for a given service. It can optionally do server-side filtering on a tag
// or nodes with passing health checks only.
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/service/"+service)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
}
if passingOnly {
r.params.Set("passing", "1")
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*ServiceEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// State is used to retreive all the checks in a given state.
// The wildcard "any" state can also be used for all checks.
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
switch state {
case "any":
case "warning":
case "critical":
case "passing":
case "unknown":
default:
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
}
r := h.c.newRequest("GET", "/v1/health/state/"+state)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*HealthCheck
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -1,98 +0,0 @@
package consulapi
import (
"testing"
"time"
)
func TestHealth_Node(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
health := c.Health()
info, err := agent.Self()
if err != nil {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"].(string)
checks, meta, err := health.Node(name, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_Checks(t *testing.T) {
c := makeClient(t)
agent := c.Agent()
health := c.Health()
// Make a service with a check
reg := &AgentServiceRegistration{
Name: "foo",
Check: &AgentServiceCheck{
TTL: "15s",
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
defer agent.ServiceDeregister("foo")
// Wait for the register...
time.Sleep(20 * time.Millisecond)
checks, meta, err := health.Checks("foo", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_Service(t *testing.T) {
c := makeClient(t)
health := c.Health()
// consul service should always exist...
checks, meta, err := health.Service("consul", "", true, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_State(t *testing.T) {
c := makeClient(t)
health := c.Health()
checks, meta, err := health.State("any", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.LastIndex == 0 {
t.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
t.Fatalf("Bad: %v", checks)
}
}

View File

@ -1,219 +0,0 @@
package consulapi
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"strings"
)
// KVPair is used to represent a single K/V entry
type KVPair struct {
Key string
CreateIndex uint64
ModifyIndex uint64
LockIndex uint64
Flags uint64
Value []byte
Session string
}
// KVPairs is a list of KVPair objects
type KVPairs []*KVPair
// KV is used to manipulate the K/V API
type KV struct {
c *Client
}
// KV is used to return a handle to the K/V apis
func (c *Client) KV() *KV {
return &KV{c}
}
// Get is used to lookup a single key
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
resp, qm, err := k.getInternal(key, nil, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to lookup all keys under a prefix
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// Keys is used to list all the keys under a prefix. Optionally,
// a separator can be used to limit the responses.
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
params := map[string]string{"keys": ""}
if separator != "" {
params["separator"] = separator
}
resp, qm, err := k.getInternal(prefix, params, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []string
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
r := k.c.newRequest("GET", "/v1/kv/"+key)
r.setQueryOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := k.c.doRequest(r)
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
resp.Body.Close()
return nil, qm, nil
} else if resp.StatusCode != 200 {
resp.Body.Close()
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
return resp, qm, nil
}
// Put is used to write a new value. Only the
// Key, Flags and Value is respected.
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
params := make(map[string]string, 1)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
_, wm, err := k.put(p.Key, params, p.Value, q)
return wm, err
}
// CAS is used for a Check-And-Set operation. The Key,
// ModifyIndex, Flags and Value are respected. Returns true
// on success or false on failures.
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
return k.put(p.Key, params, p.Value, q)
}
// Acquire is used for a lock acquisiiton operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["acquire"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
// Release is used for a lock release operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["release"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
r := k.c.newRequest("PUT", "/v1/kv/"+key)
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
r.body = bytes.NewReader(body)
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(string(buf.Bytes()), "true")
return res, qm, nil
}
// Delete is used to delete a single key
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
return k.deleteInternal(key, nil, w)
}
// DeleteTree is used to delete all keys under a prefix
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
return k.deleteInternal(prefix, []string{"recurse"}, w)
}
func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) {
r := k.c.newRequest("DELETE", "/v1/kv/"+key)
r.setWriteOptions(q)
for _, param := range params {
r.params.Set(param, "")
}
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
return qm, nil
}

View File

@ -1,374 +0,0 @@
package consulapi
import (
"bytes"
"path"
"testing"
"time"
)
func TestClientPutGetDelete(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
key := testKey()
pair, _, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
// Put the key
value := []byte("test")
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
if pair.Flags != 42 {
t.Fatalf("unexpected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete
if _, err := kv.Delete(key, nil); err != nil {
t.Fatalf("err: %v", err)
}
// Get should fail
pair, _, err = kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
}
func TestClient_List_DeleteRecurse(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Generate some test keys
prefix := testKey()
var keys []string
for i := 0; i < 100; i++ {
keys = append(keys, path.Join(prefix, testKey()))
}
// Set values
value := []byte("test")
for _, key := range keys {
p := &KVPair{Key: key, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// List the values
pairs, meta, err := kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != len(keys) {
t.Fatalf("got %d keys", len(pairs))
}
for _, pair := range pairs {
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete all
if _, err := kv.DeleteTree(prefix, nil); err != nil {
t.Fatalf("err: %v", err)
}
// List the values
pairs, _, err = kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 0 {
t.Fatalf("got %d keys", len(pairs))
}
}
func TestClient_CAS(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Put the key
key := testKey()
value := []byte("test")
p := &KVPair{Key: key, Value: value}
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("CAS failure")
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// CAS update with bad index
newVal := []byte("foo")
p.Value = newVal
p.ModifyIndex = 1
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if work {
t.Fatalf("unexpected CAS")
}
// CAS update with valid index
p.ModifyIndex = meta.LastIndex
if work, _, err := kv.CAS(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("unexpected CAS failure")
}
}
func TestClient_WatchGet(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
key := testKey()
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair != nil {
t.Fatalf("unexpected value: %#v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Put the key
value := []byte("test")
go func() {
c := makeClient(t)
kv := c.KV()
time.Sleep(100 * time.Millisecond)
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}()
// Get should work
options := &QueryOptions{WaitIndex: meta.LastIndex}
pair, meta2, err := kv.Get(key, options)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if !bytes.Equal(pair.Value, value) {
t.Fatalf("unexpected value: %#v", pair)
}
if pair.Flags != 42 {
t.Fatalf("unexpected value: %#v", pair)
}
if meta2.LastIndex <= meta.LastIndex {
t.Fatalf("unexpected value: %#v", meta2)
}
}
func TestClient_WatchList(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Get a get without a key
prefix := testKey()
key := path.Join(prefix, testKey())
pairs, meta, err := kv.List(prefix, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 0 {
t.Fatalf("unexpected value: %#v", pairs)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Put the key
value := []byte("test")
go func() {
c := makeClient(t)
kv := c.KV()
time.Sleep(100 * time.Millisecond)
p := &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}()
// Get should work
options := &QueryOptions{WaitIndex: meta.LastIndex}
pairs, meta2, err := kv.List(prefix, options)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(pairs) != 1 {
t.Fatalf("expected value: %#v", pairs)
}
if !bytes.Equal(pairs[0].Value, value) {
t.Fatalf("unexpected value: %#v", pairs)
}
if pairs[0].Flags != 42 {
t.Fatalf("unexpected value: %#v", pairs)
}
if meta2.LastIndex <= meta.LastIndex {
t.Fatalf("unexpected value: %#v", meta2)
}
}
func TestClient_Keys_DeleteRecurse(t *testing.T) {
c := makeClient(t)
kv := c.KV()
// Generate some test keys
prefix := testKey()
var keys []string
for i := 0; i < 100; i++ {
keys = append(keys, path.Join(prefix, testKey()))
}
// Set values
value := []byte("test")
for _, key := range keys {
p := &KVPair{Key: key, Value: value}
if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// List the values
out, meta, err := kv.Keys(prefix, "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != len(keys) {
t.Fatalf("got %d keys", len(out))
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Delete all
if _, err := kv.DeleteTree(prefix, nil); err != nil {
t.Fatalf("err: %v", err)
}
// List the values
out, _, err = kv.Keys(prefix, "", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 0 {
t.Fatalf("got %d keys", len(out))
}
}
func TestClient_AcquireRelease(t *testing.T) {
c := makeClient(t)
session := c.Session()
kv := c.KV()
// Make a session
id, _, err := session.CreateNoChecks(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
// Acquire the key
key := testKey()
value := []byte("test")
p := &KVPair{Key: key, Value: value, Session: id}
if work, _, err := kv.Acquire(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("Lock failure")
}
// Get should work
pair, meta, err := kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != id {
t.Fatalf("Expected lock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
// Release
if work, _, err := kv.Release(p, nil); err != nil {
t.Fatalf("err: %v", err)
} else if !work {
t.Fatalf("Release fail")
}
// Get should work
pair, meta, err = kv.Get(key, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if pair == nil {
t.Fatalf("expected value: %#v", pair)
}
if pair.LockIndex != 1 {
t.Fatalf("Expected lock: %v", pair)
}
if pair.Session != "" {
t.Fatalf("Expected unlock: %v", pair)
}
if meta.LastIndex == 0 {
t.Fatalf("unexpected value: %#v", meta)
}
}

View File

@ -1,167 +0,0 @@
package consulapi
import (
"time"
)
// SessionEntry represents a session in consul
type SessionEntry struct {
CreateIndex uint64
ID string
Name string
Node string
Checks []string
LockDelay time.Duration
}
// Session can be used to query the Session endpoints
type Session struct {
c *Client
}
// Session returns a handle to the session endpoints
func (c *Client) Session() *Session {
return &Session{c}
}
// CreateNoChecks is like Create but is used specifically to create
// a session with no associated health checks.
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
body := make(map[string]interface{})
body["Checks"] = []string{}
if se != nil {
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
}
return s.create(body, q)
}
// Create makes a new session. Providing a session entry can
// customize the session. It can also be nil to use defaults.
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
var obj interface{}
if se != nil {
body := make(map[string]interface{})
obj = body
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if len(se.Checks) > 0 {
body["Checks"] = se.Checks
}
}
return s.create(obj, q)
}
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/create")
r.setWriteOptions(q)
r.obj = obj
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Destroy invalides a given session
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/destroy/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// Info looks up a single session
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/info/"+id)
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List gets sessions for a node
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// List gets all active sessions
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/session/list")
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View File

@ -1,136 +0,0 @@
package consulapi
import (
"testing"
)
func TestSession_CreateDestroy(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, meta, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
meta, err = session.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if meta.RequestTime == 0 {
t.Fatalf("bad: %v", meta)
}
}
func TestSession_Info(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if info == nil {
t.Fatalf("should get session")
}
if info.CreateIndex == 0 {
t.Fatalf("bad: %v", info)
}
if info.ID != id {
t.Fatalf("bad: %v", info)
}
if info.Name != "" {
t.Fatalf("bad: %v", info)
}
if info.Node == "" {
t.Fatalf("bad: %v", info)
}
if len(info.Checks) == 0 {
t.Fatalf("bad: %v", info)
}
if info.LockDelay == 0 {
t.Fatalf("bad: %v", info)
}
}
func TestSession_Node(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
sessions, qm, err := session.Node(info.Node, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("bad: %v", sessions)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}
func TestSession_List(t *testing.T) {
c := makeClient(t)
session := c.Session()
id, _, err := session.Create(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
sessions, qm, err := session.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(sessions) != 1 {
t.Fatalf("bad: %v", sessions)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}

View File

@ -1,43 +0,0 @@
package consulapi
// Status can be used to query the Status endpoints
type Status struct {
c *Client
}
// Status returns a handle to the status endpoints
func (c *Client) Status() *Status {
return &Status{c}
}
// Leader is used to query for a known leader
func (s *Status) Leader() (string, error) {
r := s.c.newRequest("GET", "/v1/status/leader")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", err
}
defer resp.Body.Close()
var leader string
if err := decodeBody(resp, &leader); err != nil {
return "", err
}
return leader, nil
}
// Peers is used to query for a known raft peers
func (s *Status) Peers() ([]string, error) {
r := s.c.newRequest("GET", "/v1/status/peers")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var peers []string
if err := decodeBody(resp, &peers); err != nil {
return nil, err
}
return peers, nil
}

View File

@ -1,31 +0,0 @@
package consulapi
import (
"testing"
)
func TestStatusLeader(t *testing.T) {
c := makeClient(t)
status := c.Status()
leader, err := status.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
if leader == "" {
t.Fatalf("Expected leader")
}
}
func TestStatusPeers(t *testing.T) {
c := makeClient(t)
status := c.Status()
peers, err := status.Peers()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(peers) == 0 {
t.Fatalf("Expected peers ")
}
}

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -1,2 +0,0 @@
language: go
go: 1.3.3

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Cenk Altı
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,66 +0,0 @@
# backoff
[![GoDoc](https://godoc.org/github.com/cenkalti/backoff?status.png)](https://godoc.org/github.com/cenkalti/backoff)
[![Build Status](https://travis-ci.org/cenkalti/backoff.png)](https://travis-ci.org/cenkalti/backoff)
This is a Go port of the exponential backoff algorithm from
[google-http-java-client](https://code.google.com/p/google-http-java-client/wiki/ExponentialBackoff).
[Exponential backoff](http://en.wikipedia.org/wiki/Exponential_backoff)
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
in order to gradually find an acceptable rate.
The retries exponentially increase and stop increasing when a certain threshold is met.
## Install
```bash
go get github.com/cenkalti/backoff
```
## Example
Simple retry helper that uses exponential back-off algorithm:
```go
operation := func() error {
// An operation that might fail
}
err := backoff.Retry(operation, backoff.NewExponentialBackOff())
if err != nil {
// handle error
}
// operation is successfull
```
Ticker example:
```go
operation := func() error {
// An operation that may fail
}
b := backoff.NewExponentialBackOff()
ticker := backoff.NewTicker(b)
var err error
for t = range ticker.C {
if err = operation(); err != nil {
log.Println(err, "will retry...")
continue
}
ticker.Stop()
break
}
if err != nil {
// Operation has failed.
}
// Operation is successfull.
```

View File

@ -1,56 +0,0 @@
// Package backoff implements backoff algorithms for retrying operations.
//
// Also has a Retry() helper for retrying operations that may fail.
package backoff
import "time"
// Back-off policy when retrying an operation.
type BackOff interface {
// Gets the duration to wait before retrying the operation or
// backoff.Stop to indicate that no retries should be made.
//
// Example usage:
//
// duration := backoff.NextBackOff();
// if (duration == backoff.Stop) {
// // do not retry operation
// } else {
// // sleep for duration and retry operation
// }
//
NextBackOff() time.Duration
// Reset to initial state.
Reset()
}
// Indicates that no more retries should be made for use in NextBackOff().
const Stop time.Duration = -1
// ZeroBackOff is a fixed back-off policy whose back-off time is always zero,
// meaning that the operation is retried immediately without waiting.
type ZeroBackOff struct{}
func (b *ZeroBackOff) Reset() {}
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
// StopBackOff is a fixed back-off policy that always returns backoff.Stop for
// NextBackOff(), meaning that the operation should not be retried.
type StopBackOff struct{}
func (b *StopBackOff) Reset() {}
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
type ConstantBackOff struct {
Interval time.Duration
}
func (b *ConstantBackOff) Reset() {}
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
return &ConstantBackOff{Interval: d}
}

View File

@ -1,28 +0,0 @@
package backoff
import (
"time"
"testing"
)
func TestNextBackOffMillis(t *testing.T) {
subtestNextBackOff(t, 0, new(ZeroBackOff))
subtestNextBackOff(t, Stop, new(StopBackOff))
}
func subtestNextBackOff(t *testing.T, expectedValue time.Duration, backOffPolicy BackOff) {
for i := 0; i < 10; i++ {
next := backOffPolicy.NextBackOff()
if next != expectedValue {
t.Errorf("got: %d expected: %d", next, expectedValue)
}
}
}
func TestConstantBackOff(t *testing.T) {
backoff := NewConstantBackOff(time.Second)
if backoff.NextBackOff() != time.Second {
t.Error("invalid interval")
}
}

View File

@ -1,141 +0,0 @@
package backoff
import (
"math/rand"
"time"
)
/*
ExponentialBackOff is an implementation of BackOff that increases the back off
period for each retry attempt using a randomization function that grows exponentially.
NextBackOff() is calculated using the following formula:
randomized_interval =
retry_interval * (random value in range [1 - randomization_factor, 1 + randomization_factor])
In other words NextBackOff() will range between the randomization factor
percentage below and above the retry interval. For example, using 2 seconds as the base retry
interval and 0.5 as the randomization factor, the actual back off period used in the next retry
attempt will be between 1 and 3 seconds.
Note: max_interval caps the retry_interval and not the randomized_interval.
If the time elapsed since an ExponentialBackOff instance is created goes past the
max_elapsed_time then the method NextBackOff() starts returning backoff.Stop.
The elapsed time can be reset by calling Reset().
Example: The default retry_interval is .5 seconds, default randomization_factor is 0.5, default
multiplier is 1.5 and the default max_interval is 1 minute. For 10 tries the sequence will be
(values in seconds) and assuming we go over the max_elapsed_time on the 10th try:
request# retry_interval randomized_interval
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Implementation is not thread-safe.
*/
type ExponentialBackOff struct {
InitialInterval time.Duration
RandomizationFactor float64
Multiplier float64
MaxInterval time.Duration
// After MaxElapsedTime the ExponentialBackOff stops.
// It never stops if MaxElapsedTime == 0.
MaxElapsedTime time.Duration
Clock Clock
currentInterval time.Duration
startTime time.Time
}
// Clock is an interface that returns current time for BackOff.
type Clock interface {
Now() time.Time
}
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
DefaultRandomizationFactor = 0.5
DefaultMultiplier = 1.5
DefaultMaxInterval = 60 * time.Second
DefaultMaxElapsedTime = 15 * time.Minute
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
func NewExponentialBackOff() *ExponentialBackOff {
return &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
Multiplier: DefaultMultiplier,
MaxInterval: DefaultMaxInterval,
MaxElapsedTime: DefaultMaxElapsedTime,
Clock: SystemClock,
}
}
type systemClock struct{}
func (t systemClock) Now() time.Time {
return time.Now()
}
// SystemClock implements Clock interface that uses time.Now().
var SystemClock = systemClock{}
// Reset the interval back to the initial retry interval and restarts the timer.
func (b *ExponentialBackOff) Reset() {
b.currentInterval = b.InitialInterval
b.startTime = b.Clock.Now()
}
// NextBackOff calculates the next back off interval using the formula:
// randomized_interval = retry_interval +/- (randomization_factor * retry_interval)
func (b *ExponentialBackOff) NextBackOff() time.Duration {
// Make sure we have not gone over the maximum elapsed time.
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
return Stop
}
defer b.incrementCurrentInterval()
return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
}
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
// is created and is reset when Reset() is called.
//
// The elapsed time is computed using time.Now().UnixNano().
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
return b.Clock.Now().Sub(b.startTime)
}
// Increments the current interval by multiplying it with the multiplier.
func (b *ExponentialBackOff) incrementCurrentInterval() {
// Check for overflow, if overflow is detected set the current interval to the max interval.
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
b.currentInterval = b.MaxInterval
} else {
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
}
}
// Returns a random value from the interval:
// [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
var delta = randomizationFactor * float64(currentInterval)
var minInterval = float64(currentInterval) - delta
var maxInterval = float64(currentInterval) + delta
// Get a random value from the range [minInterval, maxInterval].
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
// we want a 33% chance for selecting either 1, 2 or 3.
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
}

View File

@ -1,111 +0,0 @@
package backoff
import (
"math"
"testing"
"time"
)
func TestBackOff(t *testing.T) {
var (
testInitialInterval = 500 * time.Millisecond
testRandomizationFactor = 0.1
testMultiplier = 2.0
testMaxInterval = 5 * time.Second
testMaxElapsedTime = 15 * time.Minute
)
exp := NewExponentialBackOff()
exp.InitialInterval = testInitialInterval
exp.RandomizationFactor = testRandomizationFactor
exp.Multiplier = testMultiplier
exp.MaxInterval = testMaxInterval
exp.MaxElapsedTime = testMaxElapsedTime
exp.Reset()
var expectedResults = []time.Duration{500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000}
for i, d := range expectedResults {
expectedResults[i] = d * time.Millisecond
}
for _, expected := range expectedResults {
assertEquals(t, expected, exp.currentInterval)
// Assert that the next back off falls in the expected range.
var minInterval = expected - time.Duration(testRandomizationFactor*float64(expected))
var maxInterval = expected + time.Duration(testRandomizationFactor*float64(expected))
var actualInterval = exp.NextBackOff()
if !(minInterval <= actualInterval && actualInterval <= maxInterval) {
t.Error("error")
}
}
}
func TestGetRandomizedInterval(t *testing.T) {
// 33% chance of being 1.
assertEquals(t, 1, getRandomValueFromInterval(0.5, 0, 2))
assertEquals(t, 1, getRandomValueFromInterval(0.5, 0.33, 2))
// 33% chance of being 2.
assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.34, 2))
assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.66, 2))
// 33% chance of being 3.
assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.67, 2))
assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.99, 2))
}
type TestClock struct {
i time.Duration
start time.Time
}
func (c *TestClock) Now() time.Time {
t := c.start.Add(c.i)
c.i += time.Second
return t
}
func TestGetElapsedTime(t *testing.T) {
var exp = NewExponentialBackOff()
exp.Clock = &TestClock{}
exp.Reset()
var elapsedTime = exp.GetElapsedTime()
if elapsedTime != time.Second {
t.Errorf("elapsedTime=%d", elapsedTime)
}
}
func TestMaxElapsedTime(t *testing.T) {
var exp = NewExponentialBackOff()
exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)}
if exp.NextBackOff() != Stop {
t.Error("error2")
}
// Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater
// than the max elapsed time.
exp.startTime = time.Time{}
assertEquals(t, Stop, exp.NextBackOff())
}
func TestBackOffOverflow(t *testing.T) {
var (
testInitialInterval time.Duration = math.MaxInt64 / 2
testMaxInterval time.Duration = math.MaxInt64
testMultiplier float64 = 2.1
)
exp := NewExponentialBackOff()
exp.InitialInterval = testInitialInterval
exp.Multiplier = testMultiplier
exp.MaxInterval = testMaxInterval
exp.Reset()
exp.NextBackOff()
// Assert that when an overflow is possible the current varerval time.Duration is set to the max varerval time.Duration .
assertEquals(t, testMaxInterval, exp.currentInterval)
}
func assertEquals(t *testing.T, expected, value time.Duration) {
if expected != value {
t.Errorf("got: %d, expected: %d", value, expected)
}
}

View File

@ -1,44 +0,0 @@
package backoff
import "time"
// Retry the function f until it does not return error or BackOff stops.
// f is guaranteed to be run at least once.
// It is the caller's responsibility to reset b after Retry returns.
//
// Usage:
// operation := func() error {
// // An operation that may fail
// }
//
// err := backoff.Retry(operation, backoff.NewExponentialBackOff())
// if err != nil {
// // Operation has failed.
// }
//
// // Operation is successfull.
//
func Retry(f func() error, b BackOff) error { return RetryNotify(f, b, nil) }
// RetryNotify calls notify function with the error and wait duration for each failed attempt before sleep.
func RetryNotify(f func() error, b BackOff, notify func(err error, wait time.Duration)) error {
var err error
var next time.Duration
b.Reset()
for {
if err = f(); err == nil {
return nil
}
if next = b.NextBackOff(); next == Stop {
return err
}
if notify != nil {
notify(err, next)
}
time.Sleep(next)
}
}

View File

@ -1,34 +0,0 @@
package backoff
import (
"errors"
"log"
"testing"
)
func TestRetry(t *testing.T) {
const successOn = 3
var i = 0
// This function is successfull on "successOn" calls.
f := func() error {
i++
log.Printf("function is called %d. time\n", i)
if i == successOn {
log.Println("OK")
return nil
}
log.Println("error")
return errors.New("error")
}
err := Retry(f, NewExponentialBackOff())
if err != nil {
t.Errorf("unexpected error: %s", err.Error())
}
if i != successOn {
t.Errorf("invalid number of retries: %d", i)
}
}

View File

@ -1,102 +0,0 @@
package backoff
import (
"runtime"
"sync"
"time"
)
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
//
// Usage:
// operation := func() error {
// // An operation that may fail
// }
//
// b := backoff.NewExponentialBackOff()
// ticker := backoff.NewTicker(b)
//
// var err error
// for _ = range ticker.C {
// if err = operation(); err != nil {
// log.Println(err, "will retry...")
// continue
// }
//
// ticker.Stop()
// break
// }
//
// if err != nil {
// // Operation has failed.
// }
//
// // Operation is successfull.
//
type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
stop chan struct{}
stopOnce sync.Once
}
// NewTicker returns a new Ticker containing a channel that will send the time at times
// specified by the BackOff argument. Ticker is guaranteed to tick at least once.
// The channel is closed when Stop method is called or BackOff stops.
func NewTicker(b BackOff) *Ticker {
c := make(chan time.Time)
t := &Ticker{
C: c,
c: c,
b: b,
stop: make(chan struct{}),
}
go t.run()
runtime.SetFinalizer(t, (*Ticker).Stop)
return t
}
// Stop turns off a ticker. After Stop, no more ticks will be sent.
func (t *Ticker) Stop() {
t.stopOnce.Do(func() { close(t.stop) })
}
func (t *Ticker) run() {
c := t.c
defer close(c)
t.b.Reset()
// Ticker is guaranteed to tick at least once.
afterC := t.send(time.Now())
for {
if afterC == nil {
return
}
select {
case tick := <-afterC:
afterC = t.send(tick)
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
}
}
}
func (t *Ticker) send(tick time.Time) <-chan time.Time {
select {
case t.c <- tick:
case <-t.stop:
return nil
}
next := t.b.NextBackOff()
if next == Stop {
t.Stop()
return nil
}
return time.After(next)
}

View File

@ -1,45 +0,0 @@
package backoff
import (
"errors"
"log"
"testing"
)
func TestTicker(t *testing.T) {
const successOn = 3
var i = 0
// This function is successfull on "successOn" calls.
f := func() error {
i++
log.Printf("function is called %d. time\n", i)
if i == successOn {
log.Println("OK")
return nil
}
log.Println("error")
return errors.New("error")
}
b := NewExponentialBackOff()
ticker := NewTicker(b)
var err error
for _ = range ticker.C {
if err = f(); err != nil {
t.Log(err)
continue
}
break
}
if err != nil {
t.Errorf("unexpected error: %s", err.Error())
}
if i != successOn {
t.Errorf("invalid number of retries: %d", i)
}
}

View File

@ -1,23 +0,0 @@
package etcd
// Add a new directory with a random etcd-generated key under the given path.
func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
raw, err := c.post(key, "", ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Add a new file with a random etcd-generated key under the given path.
func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.post(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}

View File

@ -1,73 +0,0 @@
package etcd
import "testing"
func TestAddChild(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
c.Delete("nonexistentDir", true)
}()
c.CreateDir("fooDir", 5)
_, err := c.AddChild("fooDir", "v0", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.AddChild("fooDir", "v1", 5)
if err != nil {
t.Fatal(err)
}
resp, err := c.Get("fooDir", true, false)
// The child with v0 should proceed the child with v1 because it's added
// earlier, so it should have a lower key.
if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
" The response was: %#v", resp)
}
// Creating a child under a nonexistent directory should succeed.
// The directory should be created.
resp, err = c.AddChild("nonexistentDir", "foo", 5)
if err != nil {
t.Fatal(err)
}
}
func TestAddChildDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
c.Delete("nonexistentDir", true)
}()
c.CreateDir("fooDir", 5)
_, err := c.AddChildDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.AddChildDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
resp, err := c.Get("fooDir", true, false)
// The child with v0 should proceed the child with v1 because it's added
// earlier, so it should have a lower key.
if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
" The response was: %#v", resp)
}
// Creating a child under a nonexistent directory should succeed.
// The directory should be created.
resp, err = c.AddChildDir("nonexistentDir", 5)
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,435 +0,0 @@
package etcd
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path"
"time"
)
// See SetConsistency for how to use these constants.
const (
// Using strings rather than iota because the consistency level
// could be persisted to disk, so it'd be better to use
// human-readable values.
STRONG_CONSISTENCY = "STRONG"
WEAK_CONSISTENCY = "WEAK"
)
const (
defaultBufferSize = 10
)
type Config struct {
CertFile string `json:"certFile"`
KeyFile string `json:"keyFile"`
CaCertFile []string `json:"caCertFiles"`
DialTimeout time.Duration `json:"timeout"`
Consistency string `json:"consistency"`
}
type Client struct {
config Config `json:"config"`
cluster *Cluster `json:"cluster"`
httpClient *http.Client
persistence io.Writer
cURLch chan string
// CheckRetry can be used to control the policy for failed requests
// and modify the cluster if needed.
// The client calls it before sending requests again, and
// stops retrying if CheckRetry returns some error. The cases that
// this function needs to handle include no response and unexpected
// http status code of response.
// If CheckRetry is nil, client will call the default one
// `DefaultCheckRetry`.
// Argument cluster is the etcd.Cluster object that these requests have been made on.
// Argument numReqs is the number of http.Requests that have been made so far.
// Argument lastResp is the http.Responses from the last request.
// Argument err is the reason of the failure.
CheckRetry func(cluster *Cluster, numReqs int,
lastResp http.Response, err error) error
}
// NewClient create a basic client that is configured to be used
// with the given machine list.
func NewClient(machines []string) *Client {
config := Config{
// default timeout is one second
DialTimeout: time.Second,
// default consistency level is STRONG
Consistency: STRONG_CONSISTENCY,
}
client := &Client{
cluster: NewCluster(machines),
config: config,
}
client.initHTTPClient()
client.saveConfig()
return client
}
// NewTLSClient create a basic client with TLS configuration
func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
// overwrite the default machine to use https
if len(machines) == 0 {
machines = []string{"https://127.0.0.1:4001"}
}
config := Config{
// default timeout is one second
DialTimeout: time.Second,
// default consistency level is STRONG
Consistency: STRONG_CONSISTENCY,
CertFile: cert,
KeyFile: key,
CaCertFile: make([]string, 0),
}
client := &Client{
cluster: NewCluster(machines),
config: config,
}
err := client.initHTTPSClient(cert, key)
if err != nil {
return nil, err
}
err = client.AddRootCA(caCert)
client.saveConfig()
return client, nil
}
// NewClientFromFile creates a client from a given file path.
// The given file is expected to use the JSON format.
func NewClientFromFile(fpath string) (*Client, error) {
fi, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer func() {
if err := fi.Close(); err != nil {
panic(err)
}
}()
return NewClientFromReader(fi)
}
// NewClientFromReader creates a Client configured from a given reader.
// The configuration is expected to use the JSON format.
func NewClientFromReader(reader io.Reader) (*Client, error) {
c := new(Client)
b, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, c)
if err != nil {
return nil, err
}
if c.config.CertFile == "" {
c.initHTTPClient()
} else {
err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
}
if err != nil {
return nil, err
}
for _, caCert := range c.config.CaCertFile {
if err := c.AddRootCA(caCert); err != nil {
return nil, err
}
}
return c, nil
}
// Override the Client's HTTP Transport object
func (c *Client) SetTransport(tr *http.Transport) {
c.httpClient.Transport = tr
}
// initHTTPClient initializes a HTTP client for etcd client
func (c *Client) initHTTPClient() {
tr := &http.Transport{
Dial: c.dial,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
c.httpClient = &http.Client{Transport: tr}
}
// initHTTPClient initializes a HTTPS client for etcd client
func (c *Client) initHTTPSClient(cert, key string) error {
if cert == "" || key == "" {
return errors.New("Require both cert and key path")
}
tlsCert, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return err
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{tlsCert},
InsecureSkipVerify: true,
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
Dial: c.dial,
}
c.httpClient = &http.Client{Transport: tr}
return nil
}
// SetPersistence sets a writer to which the config will be
// written every time it's changed.
func (c *Client) SetPersistence(writer io.Writer) {
c.persistence = writer
}
// SetConsistency changes the consistency level of the client.
//
// When consistency is set to STRONG_CONSISTENCY, all requests,
// including GET, are sent to the leader. This means that, assuming
// the absence of leader failures, GET requests are guaranteed to see
// the changes made by previous requests.
//
// When consistency is set to WEAK_CONSISTENCY, other requests
// are still sent to the leader, but GET requests are sent to a
// random server from the server pool. This reduces the read
// load on the leader, but it's not guaranteed that the GET requests
// will see changes made by previous requests (they might have not
// yet been committed on non-leader servers).
func (c *Client) SetConsistency(consistency string) error {
if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
}
c.config.Consistency = consistency
return nil
}
// Sets the DialTimeout value
func (c *Client) SetDialTimeout(d time.Duration) {
c.config.DialTimeout = d
}
// AddRootCA adds a root CA cert for the etcd client
func (c *Client) AddRootCA(caCert string) error {
if c.httpClient == nil {
return errors.New("Client has not been initialized yet!")
}
certBytes, err := ioutil.ReadFile(caCert)
if err != nil {
return err
}
tr, ok := c.httpClient.Transport.(*http.Transport)
if !ok {
panic("AddRootCA(): Transport type assert should not fail")
}
if tr.TLSClientConfig.RootCAs == nil {
caCertPool := x509.NewCertPool()
ok = caCertPool.AppendCertsFromPEM(certBytes)
if ok {
tr.TLSClientConfig.RootCAs = caCertPool
}
tr.TLSClientConfig.InsecureSkipVerify = false
} else {
ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
}
if !ok {
err = errors.New("Unable to load caCert")
}
c.config.CaCertFile = append(c.config.CaCertFile, caCert)
c.saveConfig()
return err
}
// SetCluster updates cluster information using the given machine list.
func (c *Client) SetCluster(machines []string) bool {
success := c.internalSyncCluster(machines)
return success
}
func (c *Client) GetCluster() []string {
return c.cluster.Machines
}
// SyncCluster updates the cluster information using the internal machine list.
func (c *Client) SyncCluster() bool {
return c.internalSyncCluster(c.cluster.Machines)
}
// internalSyncCluster syncs cluster information using the given machine list.
func (c *Client) internalSyncCluster(machines []string) bool {
for _, machine := range machines {
httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
resp, err := c.httpClient.Get(httpPath)
if err != nil {
// try another machine in the cluster
continue
} else {
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
// try another machine in the cluster
continue
}
// update Machines List
c.cluster.updateFromStr(string(b))
// update leader
// the first one in the machine list is the leader
c.cluster.switchLeader(0)
logger.Debug("sync.machines ", c.cluster.Machines)
c.saveConfig()
return true
}
}
return false
}
// createHttpPath creates a complete HTTP URL.
// serverName should contain both the host name and a port number, if any.
func (c *Client) createHttpPath(serverName string, _path string) string {
u, err := url.Parse(serverName)
if err != nil {
panic(err)
}
u.Path = path.Join(u.Path, _path)
if u.Scheme == "" {
u.Scheme = "http"
}
return u.String()
}
// dial attempts to open a TCP connection to the provided address, explicitly
// enabling keep-alives with a one-second interval.
func (c *Client) dial(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
if err != nil {
return nil, err
}
tcpConn, ok := conn.(*net.TCPConn)
if !ok {
return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
}
// Keep TCP alive to check whether or not the remote machine is down
if err = tcpConn.SetKeepAlive(true); err != nil {
return nil, err
}
if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
return nil, err
}
return tcpConn, nil
}
func (c *Client) OpenCURL() {
c.cURLch = make(chan string, defaultBufferSize)
}
func (c *Client) CloseCURL() {
c.cURLch = nil
}
func (c *Client) sendCURL(command string) {
go func() {
select {
case c.cURLch <- command:
default:
}
}()
}
func (c *Client) RecvCURL() string {
return <-c.cURLch
}
// saveConfig saves the current config using c.persistence.
func (c *Client) saveConfig() error {
if c.persistence != nil {
b, err := json.Marshal(c)
if err != nil {
return err
}
_, err = c.persistence.Write(b)
if err != nil {
return err
}
}
return nil
}
// MarshalJSON implements the Marshaller interface
// as defined by the standard JSON package.
func (c *Client) MarshalJSON() ([]byte, error) {
b, err := json.Marshal(struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{
Config: c.config,
Cluster: c.cluster,
})
if err != nil {
return nil, err
}
return b, nil
}
// UnmarshalJSON implements the Unmarshaller interface
// as defined by the standard JSON package.
func (c *Client) UnmarshalJSON(b []byte) error {
temp := struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{}
err := json.Unmarshal(b, &temp)
if err != nil {
return err
}
c.cluster = temp.Cluster
c.config = temp.Config
return nil
}

View File

@ -1,96 +0,0 @@
package etcd
import (
"encoding/json"
"fmt"
"net"
"net/url"
"os"
"testing"
)
// To pass this test, we need to create a cluster of 3 machines
// The server should be listening on 127.0.0.1:4001, 4002, 4003
func TestSync(t *testing.T) {
fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
// Explicit trailing slash to ensure this doesn't reproduce:
// https://github.com/coreos/go-etcd/issues/82
c := NewClient([]string{"http://127.0.0.1:4001/"})
success := c.SyncCluster()
if !success {
t.Fatal("cannot sync machines")
}
for _, m := range c.GetCluster() {
u, err := url.Parse(m)
if err != nil {
t.Fatal(err)
}
if u.Scheme != "http" {
t.Fatal("scheme must be http")
}
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
t.Fatal(err)
}
if host != "127.0.0.1" {
t.Fatal("Host must be 127.0.0.1")
}
}
badMachines := []string{"abc", "edef"}
success = c.SetCluster(badMachines)
if success {
t.Fatal("should not sync on bad machines")
}
goodMachines := []string{"127.0.0.1:4002"}
success = c.SetCluster(goodMachines)
if !success {
t.Fatal("cannot sync machines")
} else {
fmt.Println(c.cluster.Machines)
}
}
func TestPersistence(t *testing.T) {
c := NewClient(nil)
c.SyncCluster()
fo, err := os.Create("config.json")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := fo.Close(); err != nil {
panic(err)
}
}()
c.SetPersistence(fo)
err = c.saveConfig()
if err != nil {
t.Fatal(err)
}
c2, err := NewClientFromFile("config.json")
if err != nil {
t.Fatal(err)
}
// Verify that the two clients have the same config
b1, _ := json.Marshal(c)
b2, _ := json.Marshal(c2)
if string(b1) != string(b2) {
t.Fatalf("The two configs should be equal!")
}
}

View File

@ -1,51 +0,0 @@
package etcd
import (
"net/url"
"strings"
)
type Cluster struct {
Leader string `json:"leader"`
Machines []string `json:"machines"`
}
func NewCluster(machines []string) *Cluster {
// if an empty slice was sent in then just assume HTTP 4001 on localhost
if len(machines) == 0 {
machines = []string{"http://127.0.0.1:4001"}
}
// default leader and machines
return &Cluster{
Leader: machines[0],
Machines: machines,
}
}
// switchLeader switch the current leader to machines[num]
func (cl *Cluster) switchLeader(num int) {
logger.Debugf("switch.leader[from %v to %v]",
cl.Leader, cl.Machines[num])
cl.Leader = cl.Machines[num]
}
func (cl *Cluster) updateFromStr(machines string) {
cl.Machines = strings.Split(machines, ", ")
}
func (cl *Cluster) updateLeader(leader string) {
logger.Debugf("update.leader[%s,%s]", cl.Leader, leader)
cl.Leader = leader
}
func (cl *Cluster) updateLeaderFromURL(u *url.URL) {
var leader string
if u.Scheme == "" {
leader = "http://" + u.Host
} else {
leader = u.Scheme + "://" + u.Host
}
cl.updateLeader(leader)
}

View File

@ -1,34 +0,0 @@
package etcd
import "fmt"
func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
if prevValue == "" && prevIndex == 0 {
return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
}
options := Options{}
if prevValue != "" {
options["prevValue"] = prevValue
}
if prevIndex != 0 {
options["prevIndex"] = prevIndex
}
raw, err := c.delete(key, options)
if err != nil {
return nil, err
}
return raw, err
}

View File

@ -1,46 +0,0 @@
package etcd
import (
"testing"
)
func TestCompareAndDelete(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
// This should succeed an correct prevValue
resp, err := c.CompareAndDelete("foo", "bar", 0)
if err != nil {
t.Fatal(err)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
}
resp, _ = c.Set("foo", "bar", 5)
// This should fail because it gives an incorrect prevValue
_, err = c.CompareAndDelete("foo", "xxx", 0)
if err == nil {
t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp)
}
// This should succeed because it gives an correct prevIndex
resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
if err != nil {
t.Fatal(err)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
}
c.Set("foo", "bar", 5)
// This should fail because it gives an incorrect prevIndex
resp, err = c.CompareAndDelete("foo", "", 29817514)
if err == nil {
t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp)
}
}

View File

@ -1,36 +0,0 @@
package etcd
import "fmt"
func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
prevValue string, prevIndex uint64) (*Response, error) {
raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
prevValue string, prevIndex uint64) (*RawResponse, error) {
if prevValue == "" && prevIndex == 0 {
return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
}
options := Options{}
if prevValue != "" {
options["prevValue"] = prevValue
}
if prevIndex != 0 {
options["prevIndex"] = prevIndex
}
raw, err := c.put(key, value, ttl, options)
if err != nil {
return nil, err
}
return raw, err
}

View File

@ -1,57 +0,0 @@
package etcd
import (
"testing"
)
func TestCompareAndSwap(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
// This should succeed
resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
}
// This should fail because it gives an incorrect prevValue
resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
if err == nil {
t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp)
}
resp, err = c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed
resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
}
if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
}
// This should fail because it gives an incorrect prevIndex
resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
if err == nil {
t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp)
}
}

View File

@ -1,55 +0,0 @@
package etcd
import (
"fmt"
"io/ioutil"
"log"
"strings"
)
var logger *etcdLogger
func SetLogger(l *log.Logger) {
logger = &etcdLogger{l}
}
func GetLogger() *log.Logger {
return logger.log
}
type etcdLogger struct {
log *log.Logger
}
func (p *etcdLogger) Debug(args ...interface{}) {
msg := "DEBUG: " + fmt.Sprint(args...)
p.log.Println(msg)
}
func (p *etcdLogger) Debugf(f string, args ...interface{}) {
msg := "DEBUG: " + fmt.Sprintf(f, args...)
// Append newline if necessary
if !strings.HasSuffix(msg, "\n") {
msg = msg + "\n"
}
p.log.Print(msg)
}
func (p *etcdLogger) Warning(args ...interface{}) {
msg := "WARNING: " + fmt.Sprint(args...)
p.log.Println(msg)
}
func (p *etcdLogger) Warningf(f string, args ...interface{}) {
msg := "WARNING: " + fmt.Sprintf(f, args...)
// Append newline if necessary
if !strings.HasSuffix(msg, "\n") {
msg = msg + "\n"
}
p.log.Print(msg)
}
func init() {
// Default logger uses the go default log.
SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
}

View File

@ -1,28 +0,0 @@
package etcd
import (
"testing"
)
type Foo struct{}
type Bar struct {
one string
two int
}
// Tests that logs don't panic with arbitrary interfaces
func TestDebug(t *testing.T) {
f := &Foo{}
b := &Bar{"asfd", 3}
for _, test := range []interface{}{
1234,
"asdf",
f,
b,
} {
logger.Debug(test)
logger.Debugf("something, %s", test)
logger.Warning(test)
logger.Warningf("something, %s", test)
}
}

View File

@ -1,40 +0,0 @@
package etcd
// Delete deletes the given key.
//
// When recursive set to false, if the key points to a
// directory the method will fail.
//
// When recursive set to true, if the key points to a file,
// the file will be deleted; if the key points to a directory,
// then everything under the directory (including all child directories)
// will be deleted.
func (c *Client) Delete(key string, recursive bool) (*Response, error) {
raw, err := c.RawDelete(key, recursive, false)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// DeleteDir deletes an empty directory or a key value pair
func (c *Client) DeleteDir(key string) (*Response, error) {
raw, err := c.RawDelete(key, false, true)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
ops := Options{
"recursive": recursive,
"dir": dir,
}
return c.delete(key, ops)
}

View File

@ -1,81 +0,0 @@
package etcd
import (
"testing"
)
func TestDelete(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
resp, err := c.Delete("foo", false)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("Delete failed with %s", resp.Node.Value)
}
if !(resp.PrevNode.Value == "bar") {
t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
}
resp, err = c.Delete("foo", false)
if err == nil {
t.Fatalf("Delete should have failed because the key foo did not exist. "+
"The response was: %v", resp)
}
}
func TestDeleteAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("fooDir", true)
}()
c.SetDir("foo", 5)
// test delete an empty dir
resp, err := c.DeleteDir("foo")
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("DeleteAll 1 failed: %#v", resp)
}
if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
}
c.CreateDir("fooDir", 5)
c.Set("fooDir/foo", "bar", 5)
_, err = c.DeleteDir("fooDir")
if err == nil {
t.Fatal("should not able to delete a non-empty dir with deletedir")
}
resp, err = c.Delete("fooDir", true)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Value == "") {
t.Fatalf("DeleteAll 2 failed: %#v", resp)
}
if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
}
resp, err = c.Delete("foo", true)
if err == nil {
t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+
"The response was: %v", resp)
}
}

View File

@ -1,48 +0,0 @@
package etcd
import (
"encoding/json"
"fmt"
)
const (
ErrCodeEtcdNotReachable = 501
)
var (
errorMap = map[int]string{
ErrCodeEtcdNotReachable: "All the given peers are not reachable",
}
)
type EtcdError struct {
ErrorCode int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause,omitempty"`
Index uint64 `json:"index"`
}
func (e EtcdError) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
}
func newError(errorCode int, cause string, index uint64) *EtcdError {
return &EtcdError{
ErrorCode: errorCode,
Message: errorMap[errorCode],
Cause: cause,
Index: index,
}
}
func handleError(b []byte) error {
etcdErr := new(EtcdError)
err := json.Unmarshal(b, etcdErr)
if err != nil {
logger.Warningf("cannot unmarshal etcd error: %v", err)
return err
}
return etcdErr
}

View File

@ -1,27 +0,0 @@
package etcd
// Get gets the file or directory associated with the given key.
// If the key points to a directory, files and directories under
// it will be returned in sorted or unsorted order, depending on
// the sort flag.
// If recursive is set to false, contents under child directories
// will not be returned.
// If recursive is set to true, all the contents will be returned.
func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
raw, err := c.RawGet(key, sort, recursive)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
ops := Options{
"recursive": recursive,
"sorted": sort,
}
return c.get(key, ops)
}

View File

@ -1,131 +0,0 @@
package etcd
import (
"reflect"
"testing"
)
// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
func cleanNode(n *Node) {
n.Expiration = nil
n.ModifiedIndex = 0
n.CreatedIndex = 0
}
// cleanResult scrubs a result object two levels deep of Expiration,
// ModifiedIndex and CreatedIndex.
func cleanResult(result *Response) {
// TODO(philips): make this recursive.
cleanNode(result.Node)
for i, _ := range result.Node.Nodes {
cleanNode(result.Node.Nodes[i])
for j, _ := range result.Node.Nodes[i].Nodes {
cleanNode(result.Node.Nodes[i].Nodes[j])
}
}
}
func TestGet(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
c.Set("foo", "bar", 5)
result, err := c.Get("foo", false, false)
if err != nil {
t.Fatal(err)
}
if result.Node.Key != "/foo" || result.Node.Value != "bar" {
t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
}
result, err = c.Get("goo", false, false)
if err == nil {
t.Fatalf("should not be able to get non-exist key")
}
}
func TestGetAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
c.CreateDir("fooDir", 5)
c.Set("fooDir/k0", "v0", 5)
c.Set("fooDir/k1", "v1", 5)
// Return kv-pairs in sorted order
result, err := c.Get("fooDir", true, false)
if err != nil {
t.Fatal(err)
}
expected := Nodes{
&Node{
Key: "/fooDir/k0",
Value: "v0",
TTL: 5,
},
&Node{
Key: "/fooDir/k1",
Value: "v1",
TTL: 5,
},
}
cleanResult(result)
if !reflect.DeepEqual(result.Node.Nodes, expected) {
t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
}
// Test the `recursive` option
c.CreateDir("fooDir/childDir", 5)
c.Set("fooDir/childDir/k2", "v2", 5)
// Return kv-pairs in sorted order
result, err = c.Get("fooDir", true, true)
cleanResult(result)
if err != nil {
t.Fatal(err)
}
expected = Nodes{
&Node{
Key: "/fooDir/childDir",
Dir: true,
Nodes: Nodes{
&Node{
Key: "/fooDir/childDir/k2",
Value: "v2",
TTL: 5,
},
},
TTL: 5,
},
&Node{
Key: "/fooDir/k0",
Value: "v0",
TTL: 5,
},
&Node{
Key: "/fooDir/k1",
Value: "v1",
TTL: 5,
},
}
cleanResult(result)
if !reflect.DeepEqual(result.Node.Nodes, expected) {
t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
}
}

View File

@ -1,72 +0,0 @@
package etcd
import (
"fmt"
"net/url"
"reflect"
)
type Options map[string]interface{}
// An internally-used data structure that represents a mapping
// between valid options and their kinds
type validOptions map[string]reflect.Kind
// Valid options for GET, PUT, POST, DELETE
// Using CAPITALIZED_UNDERSCORE to emphasize that these
// values are meant to be used as constants.
var (
VALID_GET_OPTIONS = validOptions{
"recursive": reflect.Bool,
"consistent": reflect.Bool,
"sorted": reflect.Bool,
"wait": reflect.Bool,
"waitIndex": reflect.Uint64,
}
VALID_PUT_OPTIONS = validOptions{
"prevValue": reflect.String,
"prevIndex": reflect.Uint64,
"prevExist": reflect.Bool,
"dir": reflect.Bool,
}
VALID_POST_OPTIONS = validOptions{}
VALID_DELETE_OPTIONS = validOptions{
"recursive": reflect.Bool,
"dir": reflect.Bool,
"prevValue": reflect.String,
"prevIndex": reflect.Uint64,
}
)
// Convert options to a string of HTML parameters
func (ops Options) toParameters(validOps validOptions) (string, error) {
p := "?"
values := url.Values{}
if ops == nil {
return "", nil
}
for k, v := range ops {
// Check if the given option is valid (that it exists)
kind := validOps[k]
if kind == reflect.Invalid {
return "", fmt.Errorf("Invalid option: %v", k)
}
// Check if the given option is of the valid type
t := reflect.TypeOf(v)
if kind != t.Kind() {
return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
k, kind, t.Kind())
}
values.Set(k, fmt.Sprintf("%v", v))
}
p += values.Encode()
return p, nil
}

View File

@ -1,396 +0,0 @@
package etcd
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"path"
"strings"
"sync"
"time"
)
// Errors introduced by handling requests
var (
ErrRequestCancelled = errors.New("sending request is cancelled")
)
type RawRequest struct {
Method string
RelativePath string
Values url.Values
Cancel <-chan bool
}
// NewRawRequest returns a new RawRequest
func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
return &RawRequest{
Method: method,
RelativePath: relativePath,
Values: values,
Cancel: cancel,
}
}
// getCancelable issues a cancelable GET request
func (c *Client) getCancelable(key string, options Options,
cancel <-chan bool) (*RawResponse, error) {
logger.Debugf("get %s [%s]", key, c.cluster.Leader)
p := keyToPath(key)
// If consistency level is set to STRONG, append
// the `consistent` query string.
if c.config.Consistency == STRONG_CONSISTENCY {
options["consistent"] = true
}
str, err := options.toParameters(VALID_GET_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("GET", p, nil, cancel)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// get issues a GET request
func (c *Client) get(key string, options Options) (*RawResponse, error) {
return c.getCancelable(key, options, nil)
}
// put issues a PUT request
func (c *Client) put(key string, value string, ttl uint64,
options Options) (*RawResponse, error) {
logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.Leader)
p := keyToPath(key)
str, err := options.toParameters(VALID_PUT_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// post issues a POST request
func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.Leader)
p := keyToPath(key)
req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// delete issues a DELETE request
func (c *Client) delete(key string, options Options) (*RawResponse, error) {
logger.Debugf("delete %s [%s]", key, c.cluster.Leader)
p := keyToPath(key)
str, err := options.toParameters(VALID_DELETE_OPTIONS)
if err != nil {
return nil, err
}
p += str
req := NewRawRequest("DELETE", p, nil, nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// SendRequest sends a HTTP request and returns a Response as defined by etcd
func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
var req *http.Request
var resp *http.Response
var httpPath string
var err error
var respBody []byte
var numReqs = 1
checkRetry := c.CheckRetry
if checkRetry == nil {
checkRetry = DefaultCheckRetry
}
cancelled := make(chan bool, 1)
reqLock := new(sync.Mutex)
if rr.Cancel != nil {
cancelRoutine := make(chan bool)
defer close(cancelRoutine)
go func() {
select {
case <-rr.Cancel:
cancelled <- true
logger.Debug("send.request is cancelled")
case <-cancelRoutine:
return
}
// Repeat canceling request until this thread is stopped
// because we have no idea about whether it succeeds.
for {
reqLock.Lock()
c.httpClient.Transport.(*http.Transport).CancelRequest(req)
reqLock.Unlock()
select {
case <-time.After(100 * time.Millisecond):
case <-cancelRoutine:
return
}
}
}()
}
// If we connect to a follower and consistency is required, retry until
// we connect to a leader
sleep := 25 * time.Millisecond
maxSleep := time.Second
for attempt := 0; ; attempt++ {
if attempt > 0 {
select {
case <-cancelled:
return nil, ErrRequestCancelled
case <-time.After(sleep):
sleep = sleep * 2
if sleep > maxSleep {
sleep = maxSleep
}
}
}
logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath)
if rr.Method == "GET" && c.config.Consistency == WEAK_CONSISTENCY {
// If it's a GET and consistency level is set to WEAK,
// then use a random machine.
httpPath = c.getHttpPath(true, rr.RelativePath)
} else {
// Else use the leader.
httpPath = c.getHttpPath(false, rr.RelativePath)
}
// Return a cURL command if curlChan is set
if c.cURLch != nil {
command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
for key, value := range rr.Values {
command += fmt.Sprintf(" -d %s=%s", key, value[0])
}
c.sendCURL(command)
}
logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
req, err := func() (*http.Request, error) {
reqLock.Lock()
defer reqLock.Unlock()
if rr.Values == nil {
if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
return nil, err
}
} else {
body := strings.NewReader(rr.Values.Encode())
if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
return nil, err
}
req.Header.Set("Content-Type",
"application/x-www-form-urlencoded; param=value")
}
return req, nil
}()
if err != nil {
return nil, err
}
resp, err = c.httpClient.Do(req)
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
// If the request was cancelled, return ErrRequestCancelled directly
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
numReqs++
// network error, change a machine!
if err != nil {
logger.Debug("network error: ", err.Error())
lastResp := http.Response{}
if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
return nil, checkErr
}
c.cluster.switchLeader(attempt % len(c.cluster.Machines))
continue
}
// if there is no error, it should receive response
logger.Debug("recv.response.from ", httpPath)
if validHttpStatusCode[resp.StatusCode] {
// try to read byte code and break the loop
respBody, err = ioutil.ReadAll(resp.Body)
if err == nil {
logger.Debug("recv.success ", httpPath)
break
}
// ReadAll error may be caused due to cancel request
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
if err == io.ErrUnexpectedEOF {
// underlying connection was closed prematurely, probably by timeout
// TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
// this allows the client to detect that and take evasive action. Need
// to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
respBody = []byte{}
break
}
}
// if resp is TemporaryRedirect, set the new leader and retry
if resp.StatusCode == http.StatusTemporaryRedirect {
u, err := resp.Location()
if err != nil {
logger.Warning(err)
} else {
// Update cluster leader based on redirect location
// because it should point to the leader address
c.cluster.updateLeaderFromURL(u)
logger.Debug("recv.response.relocate ", u.String())
}
resp.Body.Close()
continue
}
if checkErr := checkRetry(c.cluster, numReqs, *resp,
errors.New("Unexpected HTTP status code")); checkErr != nil {
return nil, checkErr
}
resp.Body.Close()
}
r := &RawResponse{
StatusCode: resp.StatusCode,
Body: respBody,
Header: resp.Header,
}
return r, nil
}
// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
// If we have retried 2 * machine number, stop retrying.
// If status code is InternalServerError, sleep for 200ms.
func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
err error) error {
if numReqs >= 2*len(cluster.Machines) {
return newError(ErrCodeEtcdNotReachable,
"Tried to connect to each peer twice and failed", 0)
}
code := lastResp.StatusCode
if code == http.StatusInternalServerError {
time.Sleep(time.Millisecond * 200)
}
logger.Warning("bad response status code", code)
return nil
}
func (c *Client) getHttpPath(random bool, s ...string) string {
var machine string
if random {
machine = c.cluster.Machines[rand.Intn(len(c.cluster.Machines))]
} else {
machine = c.cluster.Leader
}
fullPath := machine + "/" + version
for _, seg := range s {
fullPath = fullPath + "/" + seg
}
return fullPath
}
// buildValues builds a url.Values map according to the given value and ttl
func buildValues(value string, ttl uint64) url.Values {
v := url.Values{}
if value != "" {
v.Set("value", value)
}
if ttl > 0 {
v.Set("ttl", fmt.Sprintf("%v", ttl))
}
return v
}
// convert key string to http path exclude version
// for example: key[foo] -> path[keys/foo]
// key[/] -> path[keys/]
func keyToPath(key string) string {
p := path.Join("keys", key)
// corner case: if key is "/" or "//" ect
// path join will clear the tailing "/"
// we need to add it back
if p == "keys" {
p = "keys/"
}
return p
}

View File

@ -1,89 +0,0 @@
package etcd
import (
"encoding/json"
"net/http"
"strconv"
"time"
)
const (
rawResponse = iota
normalResponse
)
type responseType int
type RawResponse struct {
StatusCode int
Body []byte
Header http.Header
}
var (
validHttpStatusCode = map[int]bool{
http.StatusCreated: true,
http.StatusOK: true,
http.StatusBadRequest: true,
http.StatusNotFound: true,
http.StatusPreconditionFailed: true,
http.StatusForbidden: true,
}
)
// Unmarshal parses RawResponse and stores the result in Response
func (rr *RawResponse) Unmarshal() (*Response, error) {
if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
return nil, handleError(rr.Body)
}
resp := new(Response)
err := json.Unmarshal(rr.Body, resp)
if err != nil {
return nil, err
}
// attach index and term to response
resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
return resp, nil
}
type Response struct {
Action string `json:"action"`
Node *Node `json:"node"`
PrevNode *Node `json:"prevNode,omitempty"`
EtcdIndex uint64 `json:"etcdIndex"`
RaftIndex uint64 `json:"raftIndex"`
RaftTerm uint64 `json:"raftTerm"`
}
type Node struct {
Key string `json:"key, omitempty"`
Value string `json:"value,omitempty"`
Dir bool `json:"dir,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
TTL int64 `json:"ttl,omitempty"`
Nodes Nodes `json:"nodes,omitempty"`
ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
CreatedIndex uint64 `json:"createdIndex,omitempty"`
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int {
return len(ns)
}
func (ns Nodes) Less(i, j int) bool {
return ns[i].Key < ns[j].Key
}
func (ns Nodes) Swap(i, j int) {
ns[i], ns[j] = ns[j], ns[i]
}

View File

@ -1,42 +0,0 @@
package etcd
import (
"fmt"
"testing"
)
func TestSetCurlChan(t *testing.T) {
c := NewClient(nil)
c.OpenCURL()
defer func() {
c.Delete("foo", true)
}()
_, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
c.cluster.Leader)
actual := c.RecvCURL()
if expected != actual {
t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
actual, expected)
}
c.SetConsistency(STRONG_CONSISTENCY)
_, err = c.Get("foo", false, false)
if err != nil {
t.Fatal(err)
}
expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?consistent=true&recursive=false&sorted=false",
c.cluster.Leader)
actual = c.RecvCURL()
if expected != actual {
t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
actual, expected)
}
}

View File

@ -1,137 +0,0 @@
package etcd
// Set sets the given key to the given value.
// It will create a new key value pair or replace the old one.
// It will not replace a existing directory.
func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawSet(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// SetDir sets the given key to a directory.
// It will create a new directory or replace the old key value pair by a directory.
// It will not replace a existing directory.
func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawSetDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// CreateDir creates a directory. It succeeds only if
// the given key does not yet exist.
func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawCreateDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// UpdateDir updates the given directory. It succeeds only if the
// given key already exists.
func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
raw, err := c.RawUpdateDir(key, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Create creates a file with the given value under the given key. It succeeds
// only if the given key does not yet exist.
func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawCreate(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// CreateInOrder creates a file with a key that's guaranteed to be higher than other
// keys in the given directory. It is useful for creating queues.
func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawCreateInOrder(dir, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
// Update updates the given key to the given value. It succeeds only if the
// given key already exists.
func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
raw, err := c.RawUpdate(key, value, ttl)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": true,
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": false,
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
return c.put(key, value, ttl, nil)
}
func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
ops := Options{
"dir": true,
}
return c.put(key, "", ttl, ops)
}
func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": true,
}
return c.put(key, value, ttl, ops)
}
func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
ops := Options{
"prevExist": false,
}
return c.put(key, value, ttl, ops)
}
func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
return c.post(dir, value, ttl)
}

View File

@ -1,241 +0,0 @@
package etcd
import (
"testing"
)
func TestSet(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
}()
resp, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
t.Fatalf("Set 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("Set 1 PrevNode failed: %#v", resp)
}
resp, err = c.Set("foo", "bar2", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
t.Fatalf("Set 2 failed: %#v", resp)
}
if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
t.Fatalf("Set 2 PrevNode failed: %#v", resp)
}
}
func TestUpdate(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("nonexistent", true)
}()
resp, err := c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed.
resp, err = c.Update("foo", "wakawaka", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
t.Fatalf("Update 1 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
t.Fatalf("Update 1 prevValue failed: %#v", resp)
}
// This should fail because the key does not exist.
resp, err = c.Update("nonexistent", "whatever", 5)
if err == nil {
t.Fatalf("The key %v did not exist, so the update should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreate(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("newKey", true)
}()
newKey := "/newKey"
newValue := "/newValue"
// This should succeed
resp, err := c.Create(newKey, newValue, 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Key == newKey &&
resp.Node.Value == newValue && resp.Node.TTL == 5) {
t.Fatalf("Create 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("Create 1 PrevNode failed: %#v", resp)
}
// This should fail, because the key is already there
resp, err = c.Create(newKey, newValue, 5)
if err == nil {
t.Fatalf("The key %v did exist, so the creation should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreateInOrder(t *testing.T) {
c := NewClient(nil)
dir := "/queue"
defer func() {
c.DeleteDir(dir)
}()
var firstKey, secondKey string
resp, err := c.CreateInOrder(dir, "1", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
t.Fatalf("Create 1 failed: %#v", resp)
}
firstKey = resp.Node.Key
resp, err = c.CreateInOrder(dir, "2", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
t.Fatalf("Create 2 failed: %#v", resp)
}
secondKey = resp.Node.Key
if firstKey >= secondKey {
t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
firstKey, secondKey)
}
}
func TestSetDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("foo", true)
c.Delete("fooDir", true)
}()
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("SetDir 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
}
// This should fail because /fooDir already points to a directory
resp, err = c.CreateDir("/fooDir", 5)
if err == nil {
t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
"The response was: %#v", resp)
}
_, err = c.Set("foo", "bar", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed
// It should replace the key
resp, err = c.SetDir("foo", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("SetDir 2 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
t.Fatalf("SetDir 2 failed: %#v", resp)
}
}
func TestUpdateDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
// This should succeed.
resp, err = c.UpdateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("UpdateDir 1 failed: %#v", resp)
}
if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
}
// This should fail because the key does not exist.
resp, err = c.UpdateDir("nonexistentDir", 5)
if err == nil {
t.Fatalf("The key %v did not exist, so the update should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}
func TestCreateDir(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("fooDir", true)
}()
// This should succeed
resp, err := c.CreateDir("fooDir", 5)
if err != nil {
t.Fatal(err)
}
if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
resp.Node.Value == "" && resp.Node.TTL == 5) {
t.Fatalf("CreateDir 1 failed: %#v", resp)
}
if resp.PrevNode != nil {
t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
}
// This should fail, because the key is already there
resp, err = c.CreateDir("fooDir", 5)
if err == nil {
t.Fatalf("The key %v did exist, so the creation should have failed."+
"The response was: %#v", resp.Node.Key, resp)
}
}

View File

@ -1,3 +0,0 @@
package etcd
const version = "v2"

View File

@ -1,103 +0,0 @@
package etcd
import (
"errors"
)
// Errors introduced by the Watch command.
var (
ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
)
// If recursive is set to true the watch returns the first change under the given
// prefix since the given index.
//
// If recursive is set to false the watch returns the first change to the given key
// since the given index.
//
// To watch for the latest change, set waitIndex = 0.
//
// If a receiver channel is given, it will be a long-term watch. Watch will block at the
//channel. After someone receives the channel, it will go on to watch that
// prefix. If a stop channel is given, the client can close long-term watch using
// the stop channel.
func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
receiver chan *Response, stop chan bool) (*Response, error) {
logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
if receiver == nil {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
return raw.Unmarshal()
}
defer close(receiver)
for {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
resp, err := raw.Unmarshal()
if err != nil {
return nil, err
}
waitIndex = resp.Node.ModifiedIndex + 1
receiver <- resp
}
}
func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
if receiver == nil {
return c.watchOnce(prefix, waitIndex, recursive, stop)
}
for {
raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
if err != nil {
return nil, err
}
resp, err := raw.Unmarshal()
if err != nil {
return nil, err
}
waitIndex = resp.Node.ModifiedIndex + 1
receiver <- raw
}
}
// helper func
// return when there is change under the given prefix
func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
options := Options{
"wait": true,
}
if waitIndex > 0 {
options["waitIndex"] = waitIndex
}
if recursive {
options["recursive"] = true
}
resp, err := c.getCancelable(key, options, stop)
if err == ErrRequestCancelled {
return nil, ErrWatchStoppedByUser
}
return resp, err
}

View File

@ -1,119 +0,0 @@
package etcd
import (
"fmt"
"runtime"
"testing"
"time"
)
func TestWatch(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("watch_foo", true)
}()
go setHelper("watch_foo", "bar", c)
resp, err := c.Watch("watch_foo", 0, false, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
t.Fatalf("Watch 1 failed: %#v", resp)
}
go setHelper("watch_foo", "bar", c)
resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
t.Fatalf("Watch 2 failed: %#v", resp)
}
routineNum := runtime.NumGoroutine()
ch := make(chan *Response, 10)
stop := make(chan bool, 1)
go setLoop("watch_foo", "bar", c)
go receiver(ch, stop)
_, err = c.Watch("watch_foo", 0, false, ch, stop)
if err != ErrWatchStoppedByUser {
t.Fatalf("Watch returned a non-user stop error")
}
if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
}
}
func TestWatchAll(t *testing.T) {
c := NewClient(nil)
defer func() {
c.Delete("watch_foo", true)
}()
go setHelper("watch_foo/foo", "bar", c)
resp, err := c.Watch("watch_foo", 0, true, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
t.Fatalf("WatchAll 1 failed: %#v", resp)
}
go setHelper("watch_foo/foo", "bar", c)
resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
if err != nil {
t.Fatal(err)
}
if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
t.Fatalf("WatchAll 2 failed: %#v", resp)
}
ch := make(chan *Response, 10)
stop := make(chan bool, 1)
routineNum := runtime.NumGoroutine()
go setLoop("watch_foo/foo", "bar", c)
go receiver(ch, stop)
_, err = c.Watch("watch_foo", 0, true, ch, stop)
if err != ErrWatchStoppedByUser {
t.Fatalf("Watch returned a non-user stop error")
}
if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
}
}
func setHelper(key, value string, c *Client) {
time.Sleep(time.Second)
c.Set(key, value, 100)
}
func setLoop(key, value string, c *Client) {
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
newValue := fmt.Sprintf("%s_%v", value, i)
c.Set(key, newValue, 100)
time.Sleep(time.Second / 10)
}
}
func receiver(c chan *Response, stop chan bool) {
for i := 0; i < 10; i++ {
<-c
}
stop <- true
}

Some files were not shown because too many files have changed in this diff Show More