Squashed 'tools/' changes from 81d80f3..35679ee

35679ee Merge pull request #110 from weaveworks/parallel-push-errors
3ae41b6 Remove unneeded if block
51ff31a Exit on first error
0faad9f Check for errors when pushing images in parallel
74dc626 Merge pull request #108 from weaveworks/disable-apt-daily
b4f1d91 Merge pull request #107 from weaveworks/docker-17-update
7436aa1 Override apt daily job to not run immediately on boot
7980f15 Merge pull request #106 from weaveworks/document-docker-install-role
f741e53 Bump to Docker 17.06 from CE repo
61796a1 Update Docker CE Debian repo details
0d86f5e Allow for Docker package to be named docker-ce
065c68d Document selection of Docker installation role.
3809053 Just --porcelain; it defaults to v1
11400ea Merge pull request #105 from weaveworks/remove-weaveplugin-remnants
b8b4d64 remove weaveplugin remnants
35099c9 Merge pull request #104 from weaveworks/pull-docker-py
cdd48fc Pull docker-py to speed tests/builds up.
e1c6c24 Merge pull request #103 from weaveworks/test-build-tags
d5d71e0 Add -tags option so callers can pass in build tags
8949b2b Merge pull request #98 from weaveworks/git-status-tag
ac30687 Merge pull request #100 from weaveworks/python_linting
4b125b5 Pin yapf & flake8 versions
7efb485 Lint python linting function
444755b Swap diff direction to reflect changes required
c5b2434 Install flake8 & yapf
5600eac Lint python in build-tools repo
0b02ca9 Add python linting
c011c0d Merge pull request #79 from kinvolk/schu/python-shebang
6577d07 Merge pull request #99 from weaveworks/shfmt-version
00ce0dc Use git status instead of diff to add 'WIP' tag
411fd13 Use shfmt v1.3.0 instead of latest from master.
0d6d4da Run shfmt 1.3 on the code.
5cdba32 Add sudo
c322ca8 circle.yml: Install shfmt binary.
e59c225 Install shfmt 1.3 binary.
30706e6 Install pyhcl in the build container.
960d222 Merge pull request #97 from kinvolk/alban/update-shfmt-3
1d535c7 shellcheck: fix escaping issue
5542498 Merge pull request #96 from kinvolk/alban/update-shfmt-2
32f7cc5 shfmt: fix coding style
09f72af lint: print the diff in case of error
571c7d7 Merge pull request #95 from kinvolk/alban/update-shfmt
bead6ed Update for latest shfmt
b08dc4d Update for latest shfmt (#94)
2ed8aaa Add no-race argument to test script (#92)
80dd78e Merge pull request #91 from weaveworks/upgrade-go-1.8.1
08dcd0d Please ./lint as shfmt changed its rules between 1.0.0 and 1.3.0.
a8bc9ab Upgrade default Go version to 1.8.1.
41c5622 Merge pull request #90 from weaveworks/build-golang-service-conf
e8ebdd5 broaden imagetag regex to fix haskell build image
ba3fbfa Merge pull request #89 from weaveworks/build-golang-service-conf
e506f1b Fix up test script for updated shfmt
9216db8 Add stuff for service-conf build to build-goland image
66a9a93 Merge pull request #88 from weaveworks/haskell-image
cb3e3a2 shfmt
74a5239 Haskell build image
4ccd42b Trying circle quay login
b2c295f Merge branch 'common-build'
0ac746f Trim quay prefix in circle script
c405b31 Merge pull request #87 from weaveworks/common-build
9672d7c Push build images to quay as they have sane robot accounts
a2bf112 Review feedback
fef9b7d Add protobuf tools
10a77ea Update readme
254f266 Don't need the image name in
ffb59fc Adding a weaveworks/build-golang image with tags
b817368 Update min Weave Net docker version
cf87ca3 Merge pull request #86 from weaveworks/lock-kubeadm-version
3ae6919 Add example of custom SSH private key to tf_ssh's usage.
cf8bd8a Add example of custom SSH private key to tf_ansi's usage.
c7d3370 Lock kubeadm's Kubernetes version.
faaaa6f Merge pull request #84 from weaveworks/centos-rhel
ef552e7 Select weave-kube YAML URL based on K8S version.
b4c1198 Upgrade default kubernetes_version to 1.6.1.
b82805e Use a fixed version of kubeadm.
f33888b Factorise and make kubeconfig option optional.
f7b8b89 Install EPEL repo for CentOS.
615917a Fix error in decrypting AWS access key and secret.
86f97b4 Add CentOS 7 AMI and username for AWS via Terraform.
eafd810 Add tf_ansi example with Ansible variables.
2b05787 Skip setup of Docker over TCP for CentOS/RHEL.
84c420b Add docker-ce role for CentOS/RHEL.
00a820c Add setup_weave-net_debug.yml playbook for user issues' debugging.
3eae480 Upgrade default kubernetes_version to 1.5.4.
753921c Allow injection of Docker installation role.
e1ff90d Fix kubectl taint command for 1.5.
b989e97 Fix typo in kubectl taint for single node K8S cluster.
541f58d Remove 'install_recommends: no' for ethtool.
c3f9711 Make Ansible role docker-from-get.docker.com work on RHEL/CentOS.
038c0ae Add frequently used OS images, for convenience.
d30649f Add --insecure-registry to docker.conf
1dd9218 shfmt -i 4 -w push-images
6de96ac Add option to not push docker hub images
310f53d Add push-images script from cortex
8641381 Add port 6443 to kubeadm join commands for K8S 1.6+.
50bf0bc Force type of K8S token to string.
08ab1c0 Remove trailing whitespaces.
ae9efb8 Enable testing against K8S release candidates.
9e32194 Secure GCP servers for Scope: open port 80.
a22536a Secure GCP servers for Scope.
89c3a29 Merge pull request #78 from weaveworks/lint-merge-rebase-issue-in-docs
73ad56d Add linter function to avoid bad merge/rebase artefact
31d069d Change Python shebang to `#!/usr/bin/env python`
52d695c Merge pull request #77 from kinvolk/schu/fix-relative-weave-path
77aed01 Merge pull request #73 from weaveworks/mike/sched/fix-unicode-issue
7c080f4 integration/sanity_check: disable SC1090
d6d360a integration/gce.sh: update gcloud command
e8def2c provisioning/setup: fix shellcheck SC2140
cc02224 integration/config: fix weave path
9c0d6a5 Fix config_management/README.md
334708c Merge pull request #75 from kinvolk/alban/external-build-1
da2505d gce.sh: template: print creation date
e676854 integration tests: fix user account
8530836 host nameing: add repo name
b556c0a gce.sh: fix deletion of gce instances
2ecd1c2 integration: fix GCE --zones/--zone parameter
3e863df sched: Fix unicode encoding issues
51785b5 Use rm -f and set current dir using BASH_SOURCE.
f5c6d68 Merge pull request #71 from kinvolk/schu/fix-linter-warnings
0269628 Document requirement for `lint_sh`
9a3f09e Fix linter warnings
efcf9d2 Merge pull request #53 from weaveworks/2647-testing-mvp
d31ea57 Weave Kube playbook now works with multiple nodes.
27868dd Add GCP firewall rule for FastDP crypto.
edc8bb3 Differentiated name of dev and test playbooks, to avoid confusion.
efa3df7 Moved utility Ansible Yaml to library directory.
fcd2769 Add shorthands to run Ansible playbooks against Terraform-provisioned virtual machines.
f7946fb Add shorthands to SSH into Terraform-provisioned virtual machines.
aad5c6f Mention Terraform and Ansible in README.md.
dddabf0 Add Terraform output required for templates' creation.
dcc7d02 Add Ansible configuration playbooks for development environments.
f86481c Add Ansible configuration playbooks for Docker, K8S and Weave-Net.
efedd25 Git-ignore Ansible retry files.
765c4ca Add helper functions to setup Terraform programmatically.
801dd1d Add Terraform cloud provisioning scripts.
b8017e1 Install hclfmt on CircleCI.
4815e19 Git-ignore Terraform state files.
0aaebc7 Add script to generate cartesian product of dependencies of cross-version testing.
007d90a Add script to list OS images from GCP, AWS and DO.
ca65cc0 Add script to list relevant versions of Go, Docker and Kubernetes.
aa66f44 Scripts now source dependencies using absolute path (previously breaking make depending on current directory).
7865e86 Add -p option to parallelise lint.
36c1835 Merge pull request #69 from weaveworks/mflag
9857568 Use mflag and mflagext package from weaveworks/common.
9799112 Quote bash variable.
10a36b3 Merge pull request #67 from weaveworks/shfmt-ignore
a59884f Add support for .lintignore.
03cc598 Don't lint generated protobuf code.
2b55c2d Merge pull request #66 from weaveworks/reduce-test-timeout
d4e163c Make timeout a flag
49a8609 Reduce test timeout
8fa15cb Merge pull request #63 from weaveworks/test-defaults
b783528 Tweak test script so it can be run on a mca
a3b18bf Merge pull request #65 from weaveworks/fix-integration-tests
ecb5602 Fix integration tests
f9dcbf6 ... without tab (clearly not my day)
a6215c3 Add break I forgot
0e6832d Remove incorrectly added tab
eb26c68 Merge pull request #64 from weaveworks/remove-test-package-linting
f088e83 Review feedback
2c6e83e Remove test package linting
2b3a1bb Merge pull request #62 from weaveworks/revert-61-test-defaults
8c3883a Revert "Make no-go-get the default, and don't assume -tags netgo"
e75c226 Fix bug in GC of firewall rules.
e49754e Merge pull request #51 from weaveworks/gc-firewall-rules
191f487 Add flag to enale/disable firewall rules' GC.
567905c Add GC of firewall rules for weave-net-tests to scheduler.
03119e1 Fix typo in GC of firewall rules.
bbe3844 Fix regular expression for firewall rules.
c5c23ce Pre-change refactoring: splitted gc_project function into smaller methods for better readability.
ed5529f GC firewall rules
ed8e757 Merge pull request #61 from weaveworks/test-defaults
57856e6 Merge pull request #56 from weaveworks/remove-wcloud
dd5f3e6 Add -p flag to test, run test in parallel
62f6f94 Make no-go-get the default, and don't assume -tags netgo
8946588 Merge pull request #60 from weaveworks/2647-gc-weave-net-tests
4085df9 Scheduler now also garbage-collects VMs from weave-net-tests.
4b7d5c6 Merge pull request #59 from weaveworks/57-fix-lint-properly
b7f0e69 Merge pull request #58 from weaveworks/fix-lint
794702c Pin version of shfmt
ab1b11d Fix lint
d1a5e46 Remove wcloud cli tool

git-subtree-dir: tools
git-subtree-split: 35679ee5ff17c4edf864b7c43dc70a40337fcd80
This commit is contained in:
Marcus Cobden 2017-07-28 14:20:45 +01:00
parent 0c4ede738a
commit 46b8099b5c
89 changed files with 3647 additions and 5957 deletions

5
.gitignore vendored
View File

@ -2,6 +2,9 @@ cover/cover
socks/proxy
socks/image.tar
runner/runner
cmd/wcloud/wcloud
*.pyc
*~
terraform.tfstate
terraform.tfstate.backup
*.retry
build/**/.uptodate

View File

@ -2,12 +2,17 @@
Included in this repo are tools shared by weave.git and scope.git. They include
- ```build```: a set of docker base-images for building weave
projects. These should be used instead of giving each project its
own build image.
- ```provisioning```: a set of Terraform scripts to provision virtual machines in GCP, AWS or Digital Ocean.
- ```config_management```: a set of Ansible playbooks to configure virtual machines for development, testing, etc.
- ```cover```: a tool which merges overlapping coverage reports generated by go
test
- ```files-with-type```: a tool to search directories for files of a given
MIME type
- ```lint```: a script to lint Go project; runs various tools like golint, go
vet, errcheck etc
- ```lint```: a script to lint go, sh and hcl files; runs various tools like
golint, go vet, errcheck, shellcheck etc
- ```rebuild-image```: a script to rebuild docker images when their input files
change; useful when you using docker images to build your software, but you
don't want to build the image every time.
@ -24,6 +29,11 @@ Included in this repo are tools shared by weave.git and scope.git. They include
- ```scheduler```: an appengine application that can be used to distribute
tests across different shards in CircleCI.
## Requirements
- ```lint``` requires shfmt to lint sh files; get shfmt with
```go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt```
## Using build-tools.git
To allow you to tie your code to a specific version of build-tools.git, such

46
build/Makefile Normal file
View File

@ -0,0 +1,46 @@
.PHONY: all clean images
.DEFAULT_GOAL := all
# Boiler plate for bulding Docker containers.
# All this must go at top of file I'm afraid.
IMAGE_PREFIX := quay.io/weaveworks/build-
IMAGE_TAG := $(shell ../image-tag)
UPTODATE := .uptodate
# Every directory with a Dockerfile in it builds an image called
# $(IMAGE_PREFIX)<dirname>. Dependencies (i.e. things that go in the image)
# still need to be explicitly declared.
%/$(UPTODATE): %/Dockerfile %/*
$(SUDO) docker build -t $(IMAGE_PREFIX)$(shell basename $(@D)) $(@D)/
$(SUDO) docker tag $(IMAGE_PREFIX)$(shell basename $(@D)) $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG)
touch $@
# Get a list of directories containing Dockerfiles
DOCKERFILES := $(shell find . -name tools -prune -o -name vendor -prune -o -type f -name 'Dockerfile' -print)
UPTODATE_FILES := $(patsubst %/Dockerfile,%/$(UPTODATE),$(DOCKERFILES))
DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES))
IMAGE_NAMES := $(foreach dir,$(DOCKER_IMAGE_DIRS),$(patsubst %,$(IMAGE_PREFIX)%,$(shell basename $(dir))))
images:
$(info $(IMAGE_NAMES))
@echo > /dev/null
# Define imagetag-golang, etc, for each image, which parses the dockerfile and
# prints an image tag. For example:
# FROM golang:1.8.1-stretch
# in the "foo/Dockerfile" becomes:
# $ make imagetag-foo
# 1.8.1-stretch
define imagetag_dep
.PHONY: imagetag-$(1)
$(patsubst $(IMAGE_PREFIX)%,imagetag-%,$(1)): $(patsubst $(IMAGE_PREFIX)%,%,$(1))/Dockerfile
@cat $$< | grep "^FROM " | head -n1 | sed 's/FROM \(.*\):\(.*\)/\2/'
endef
$(foreach image, $(IMAGE_NAMES), $(eval $(call imagetag_dep, $(image))))
all: $(UPTODATE_FILES)
clean:
$(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true
rm -rf $(UPTODATE_FILES)

49
build/golang/Dockerfile Normal file
View File

@ -0,0 +1,49 @@
FROM golang:1.8.0-stretch
RUN apt-get update && \
apt-get install -y \
curl \
file \
git \
jq \
libprotobuf-dev \
make \
protobuf-compiler \
python-pip \
python-requests \
python-yaml \
unzip && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN pip install attrs pyhcl
RUN curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
chmod +x shfmt && \
mv shfmt /usr/bin
RUN go clean -i net && \
go install -tags netgo std && \
go install -race -tags netgo std
RUN go get -tags netgo \
github.com/FiloSottile/gvt \
github.com/client9/misspell/cmd/misspell \
github.com/fatih/hclfmt \
github.com/fzipp/gocyclo \
github.com/gogo/protobuf/gogoproto \
github.com/gogo/protobuf/protoc-gen-gogoslick \
github.com/golang/dep/... \
github.com/golang/lint/golint \
github.com/golang/protobuf/protoc-gen-go \
github.com/kisielk/errcheck \
github.com/mjibson/esc \
github.com/prometheus/prometheus/cmd/promtool && \
rm -rf /go/pkg /go/src
RUN mkdir protoc && \
cd protoc && \
curl -O -L https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip && \
unzip protoc-3.1.0-linux-x86_64.zip && \
cp bin/protoc /usr/bin/ && \
chmod o+x /usr/bin/protoc && \
cd .. && \
rm -rf protoc
RUN mkdir -p /var/run/secrets/kubernetes.io/serviceaccount && \
touch /var/run/secrets/kubernetes.io/serviceaccount/token
COPY build.sh /
ENTRYPOINT ["/build.sh"]

22
build/golang/build.sh Executable file
View File

@ -0,0 +1,22 @@
#!/bin/sh
set -eu
if [ -n "${SRC_NAME:-}" ]; then
SRC_PATH=${SRC_PATH:-$GOPATH/src/$SRC_NAME}
elif [ -z "${SRC_PATH:-}" ]; then
echo "Must set either \$SRC_NAME or \$SRC_PATH."
exit 1
fi
# If we run make directly, any files created on the bind mount
# will have awkward ownership. So we switch to a user with the
# same user and group IDs as source directory. We have to set a
# few things up so that sudo works without complaining later on.
uid=$(stat --format="%u" "$SRC_PATH")
gid=$(stat --format="%g" "$SRC_PATH")
echo "weave:x:$uid:$gid::$SRC_PATH:/bin/sh" >>/etc/passwd
echo "weave:*:::::::" >>/etc/shadow
echo "weave ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
su weave -c "PATH=$PATH make -C $SRC_PATH BUILD_IN_CONTAINER=false $*"

4
build/haskell/Dockerfile Normal file
View File

@ -0,0 +1,4 @@
FROM fpco/stack-build:lts-8.9
COPY build.sh /
COPY copy-libraries /usr/local/bin/
ENTRYPOINT ["/build.sh"]

12
build/haskell/build.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/sh
#
# Build a static Haskell binary using stack.
set -eu
if [ -z "${SRC_PATH:-}" ]; then
echo "Must set \$SRC_PATH."
exit 1
fi
make -C "$SRC_PATH" BUILD_IN_CONTAINER=false "$@"

41
build/haskell/copy-libraries Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash
#
# Copy dynamically linked libraries for a binary, so we can assemble a Docker
# image.
#
# Run with:
# copy-libraries /path/to/binary /output/dir
#
# Dependencies:
# - awk
# - cp
# - grep
# - ldd
# - mkdir
set -o errexit
set -o nounset
set -o pipefail
# Path to a Linux binary that we're going to run in the container.
binary_path="${1}"
# Path to directory to write the output to.
output_dir="${2}"
exe_name=$(basename "${binary_path}")
# Identify linked libraries.
libraries=($(ldd "${binary_path}" | awk '{print $(NF-1)}' | grep -v '=>'))
# Add /bin/sh, which we need for Docker imports.
libraries+=('/bin/sh')
mkdir -p "${output_dir}"
# Copy executable and all needed libraries into temporary directory.
cp "${binary_path}" "${output_dir}/${exe_name}"
for lib in "${libraries[@]}"; do
mkdir -p "${output_dir}/$(dirname "$lib")"
# Need -L to make sure we get actual libraries & binaries, not symlinks to
# them.
cp -L "${lib}" "${output_dir}/${lib}"
done

View File

@ -13,13 +13,19 @@ dependencies:
- go install -tags netgo std
- mkdir -p $(dirname $SRCDIR)
- cp -r $(pwd)/ $SRCDIR
- |
curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
chmod +x shfmt && \
sudo mv shfmt /usr/bin
- |
cd $SRCDIR;
go get \
github.com/fzipp/gocyclo \
github.com/golang/lint/golint \
github.com/kisielk/errcheck \
./vendor/github.com/mvdan/sh/cmd/shfmt
github.com/fatih/hclfmt
- pip install yapf==0.16.2 flake8==3.3.0
test:
override:
@ -27,5 +33,23 @@ test:
- cd $SRCDIR/cover; make
- cd $SRCDIR/socks; make
- cd $SRCDIR/runner; make
- cd $SRCDIR/cmd/wcloud; make
- cd $SRCDIR/build; make
deployment:
snapshot:
branch: master
commands:
- docker login -e "$DOCKER_REGISTRY_EMAIL" -u "$DOCKER_REGISTRY_USER" -p "$DOCKER_REGISTRY_PASS" "$DOCKER_REGISTRY_URL"
- |
cd $SRCDIR/build;
for image in $(make images); do
# Tag the built images with the revision of this repo.
docker push "${image}:${GIT_TAG}"
# Tag the built images with something derived from the base images in
# their respective Dockerfiles. So "FROM golang:1.8.0-stretch" as a
# base image would lead to a tag of "1.8.0-stretch"
IMG_TAG=$(make "imagetag-${image#quay.io/weaveworks/build-}")
docker tag "${image}:latest" "${image}:${IMG_TAG}"
docker push "${image}:${IMG_TAG}"
done

View File

@ -1,11 +0,0 @@
.PHONY: all clean
all: wcloud
wcloud: *.go
go get ./$(@D)
go build -o $@ ./$(@D)
clean:
rm -rf wcloud
go clean ./...

View File

@ -1,238 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
"time"
"github.com/olekukonko/tablewriter"
"gopkg.in/yaml.v2"
)
// ArrayFlags allows you to collect repeated flags
type ArrayFlags []string
func (a *ArrayFlags) String() string {
return strings.Join(*a, ",")
}
// Set implements flags.Value
func (a *ArrayFlags) Set(value string) error {
*a = append(*a, value)
return nil
}
func env(key, def string) string {
if val, ok := os.LookupEnv(key); ok {
return val
}
return def
}
var (
token = env("SERVICE_TOKEN", "")
baseURL = env("BASE_URL", "https://cloud.weave.works")
)
func usage() {
fmt.Println(`Usage:
deploy <image>:<version> Deploy image to your configured env
list List recent deployments
config (<filename>) Get (or set) the configured env
logs <deploy> Show lots for the given deployment`)
}
func main() {
if len(os.Args) <= 1 {
usage()
os.Exit(1)
}
c := NewClient(token, baseURL)
switch os.Args[1] {
case "deploy":
deploy(c, os.Args[2:])
case "list":
list(c, os.Args[2:])
case "config":
config(c, os.Args[2:])
case "logs":
logs(c, os.Args[2:])
case "events":
events(c, os.Args[2:])
case "help":
usage()
default:
usage()
}
}
func deploy(c Client, args []string) {
var (
flags = flag.NewFlagSet("", flag.ContinueOnError)
username = flags.String("u", "", "Username to report to deploy service (default with be current user)")
services ArrayFlags
)
flags.Var(&services, "service", "Service to update (can be repeated)")
if err := flags.Parse(args); err != nil {
usage()
return
}
args = flags.Args()
if len(args) != 1 {
usage()
return
}
parts := strings.SplitN(args[0], ":", 2)
if len(parts) < 2 {
usage()
return
}
if *username == "" {
user, err := user.Current()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
*username = user.Username
}
deployment := Deployment{
ImageName: parts[0],
Version: parts[1],
TriggeringUser: *username,
IntendedServices: services,
}
if err := c.Deploy(deployment); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
func list(c Client, args []string) {
var (
flags = flag.NewFlagSet("", flag.ContinueOnError)
since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results")
)
if err := flags.Parse(args); err != nil {
usage()
return
}
through := time.Now()
from := through.Add(-*since)
deployments, err := c.GetDeployments(from.Unix(), through.Unix())
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Created", "ID", "Image", "Version", "State"})
table.SetBorder(false)
table.SetColumnSeparator(" ")
for _, deployment := range deployments {
table.Append([]string{
deployment.CreatedAt.Format(time.RFC822),
deployment.ID,
deployment.ImageName,
deployment.Version,
deployment.State,
})
}
table.Render()
}
func events(c Client, args []string) {
var (
flags = flag.NewFlagSet("", flag.ContinueOnError)
since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results")
)
if err := flags.Parse(args); err != nil {
usage()
return
}
through := time.Now()
from := through.Add(-*since)
events, err := c.GetEvents(from.Unix(), through.Unix())
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Println("events: ", string(events))
}
func loadConfig(filename string) (*Config, error) {
extension := filepath.Ext(filename)
var config Config
buf, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
if extension == ".yaml" || extension == ".yml" {
if err := yaml.Unmarshal(buf, &config); err != nil {
return nil, err
}
} else {
if err := json.NewDecoder(bytes.NewReader(buf)).Decode(&config); err != nil {
return nil, err
}
}
return &config, nil
}
func config(c Client, args []string) {
if len(args) > 1 {
usage()
return
}
if len(args) == 1 {
config, err := loadConfig(args[0])
if err != nil {
fmt.Println("Error reading config:", err)
os.Exit(1)
}
if err := c.SetConfig(config); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
} else {
config, err := c.GetConfig()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
buf, err := yaml.Marshal(config)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Println(string(buf))
}
}
func logs(c Client, args []string) {
if len(args) != 1 {
usage()
return
}
output, err := c.GetLogs(args[0])
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Println(string(output))
}

View File

@ -1,150 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
)
// Client for the deployment service
type Client struct {
token string
baseURL string
}
// NewClient makes a new Client
func NewClient(token, baseURL string) Client {
return Client{
token: token,
baseURL: baseURL,
}
}
func (c Client) newRequest(method, path string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequest(method, c.baseURL+path, body)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", fmt.Sprintf("Scope-Probe token=%s", c.token))
return req, nil
}
// Deploy notifies the deployment service about a new deployment
func (c Client) Deploy(deployment Deployment) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(deployment); err != nil {
return err
}
req, err := c.newRequest("POST", "/api/deploy/deploy", &buf)
if err != nil {
return err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if res.StatusCode != 204 {
return fmt.Errorf("error making request: %s", res.Status)
}
return nil
}
// GetDeployments returns a list of deployments
func (c Client) GetDeployments(from, through int64) ([]Deployment, error) {
req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy?from=%d&through=%d", from, through), nil)
if err != nil {
return nil, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("error making request: %s", res.Status)
}
var response struct {
Deployments []Deployment `json:"deployments"`
}
if err := json.NewDecoder(res.Body).Decode(&response); err != nil {
return nil, err
}
return response.Deployments, nil
}
// GetEvents returns the raw events.
func (c Client) GetEvents(from, through int64) ([]byte, error) {
req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/event?from=%d&through=%d", from, through), nil)
if err != nil {
return nil, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("error making request: %s", res.Status)
}
return ioutil.ReadAll(res.Body)
}
// GetConfig returns the current Config
func (c Client) GetConfig() (*Config, error) {
req, err := c.newRequest("GET", "/api/config/deploy", nil)
if err != nil {
return nil, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode == 404 {
return nil, fmt.Errorf("no configuration uploaded yet")
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("error making request: %s", res.Status)
}
var config Config
if err := json.NewDecoder(res.Body).Decode(&config); err != nil {
return nil, err
}
return &config, nil
}
// SetConfig sets the current Config
func (c Client) SetConfig(config *Config) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(config); err != nil {
return err
}
req, err := c.newRequest("POST", "/api/config/deploy", &buf)
if err != nil {
return err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if res.StatusCode != 204 {
return fmt.Errorf("error making request: %s", res.Status)
}
return nil
}
// GetLogs returns the logs for a given deployment.
func (c Client) GetLogs(deployID string) ([]byte, error) {
req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy/%s/log", deployID), nil)
if err != nil {
return nil, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("error making request: %s", res.Status)
}
return ioutil.ReadAll(res.Body)
}

View File

@ -1,43 +0,0 @@
package main
import (
"time"
)
// Deployment describes a deployment
type Deployment struct {
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
ImageName string `json:"image_name"`
Version string `json:"version"`
Priority int `json:"priority"`
State string `json:"status"`
TriggeringUser string `json:"triggering_user"`
IntendedServices []string `json:"intended_services"`
}
// Config for the deployment system for a user.
type Config struct {
RepoURL string `json:"repo_url" yaml:"repo_url"`
RepoBranch string `json:"repo_branch" yaml:"repo_branch"`
RepoPath string `json:"repo_path" yaml:"repo_path"`
RepoKey string `json:"repo_key" yaml:"repo_key"`
KubeconfigPath string `json:"kubeconfig_path" yaml:"kubeconfig_path"`
AutoApply bool `json:"auto_apply" yaml:"auto_apply"`
Notifications []NotificationConfig `json:"notifications" yaml:"notifications"`
// Globs of files not to change, relative to the route of the repo
ConfigFileBlackList []string `json:"config_file_black_list" yaml:"config_file_black_list"`
CommitMessageTemplate string `json:"commit_message_template" yaml:"commit_message_template"` // See https://golang.org/pkg/text/template/
}
// NotificationConfig describes how to send notifications
type NotificationConfig struct {
SlackWebhookURL string `json:"slack_webhook_url" yaml:"slack_webhook_url"`
SlackUsername string `json:"slack_username" yaml:"slack_username"`
MessageTemplate string `json:"message_template" yaml:"message_template"`
ApplyMessageTemplate string `json:"apply_message_template" yaml:"apply_message_template"`
}

141
config_management/README.md Normal file
View File

@ -0,0 +1,141 @@
# Weaveworks configuration management
## Introduction
This project allows you to configure a machine with:
* Docker and Weave Net for development: `setup_weave-net_dev.yml`
* Docker and Weave Net for testing: `setup_weave-net_test.yml`
* Docker, Kubernetes and Weave Kube (CNI plugin): `setup_weave-kube.yml`
You can then use these environments for development, testing and debugging.
## Set up
You will need [Python](https://www.python.org/downloads/) and [Ansible 2.+](http://docs.ansible.com/ansible/intro_installation.html) installed on your machine and added to your `PATH` in order to be able to configure environments automatically.
* On any platform, if you have Python installed: `pip install ansible`
* On macOS: `brew install ansible`
* On Linux (via Aptitude): `sudo apt install ansible`
* On Linux (via YUM): `sudo yum install ansible`
* For other platforms or more details, see [here](http://docs.ansible.com/ansible/intro_installation.html)
Frequent errors during installation are:
* `fatal error: Python.h: No such file or directory`: install `python-dev`
* `fatal error: ffi.h: No such file or directory`: install `libffi-dev`
* `fatal error: openssl/opensslv.h: No such file or directory`: install `libssl-dev`
Full steps for a blank Ubuntu/Debian Linux machine:
sudo apt-get install -qq -y python-pip python-dev libffi-dev libssl-dev
sudo pip install -U cffi
sudo pip install ansible
## Tags
These can be used to selectively run (`--tags "tag1,tag2"`) or skip (`--skip-tags "tag1,tag2"`) tasks.
* `output`: print potentially useful output from hosts (e.g. output of `kubectl get pods --all-namespaces`)
## Usage
### Local machine
```
ansible-playbook -u <username> -i "localhost", -c local setup_weave-kube.yml
```
### Vagrant
Provision your local VM using Vagrant:
```
cd $(mktemp -d -t XXX)
vagrant init ubuntu/xenial64 # or, e.g. centos/7
vagrant up
```
then set the following environment variables by extracting the output of `vagrant ssh-config`:
```
eval $(vagrant ssh-config | sed \
-ne 's/\ *HostName /vagrant_ssh_host=/p' \
-ne 's/\ *User /vagrant_ssh_user=/p' \
-ne 's/\ *Port /vagrant_ssh_port=/p' \
-ne 's/\ *IdentityFile /vagrant_ssh_id_file=/p')
```
and finally run:
```
ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \
--ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
-i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml
```
or, for specific versions of Kubernetes and Docker:
```
ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \
--ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
-i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml \
--extra-vars "docker_version=1.12.3 kubernetes_version=1.4.4"
```
NOTE: Kubernetes APT repo includes only the latest version, so currently
retrieving an older version will fail.
### Terraform
Provision your machine using the Terraform scripts from `../provisioning`, then run:
```
terraform output ansible_inventory > /tmp/ansible_inventory
```
and
```
ansible-playbook \
--private-key="$(terraform output private_key_path)" \
-u "$(terraform output username)" \
-i /tmp/ansible_inventory \
--ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
../../config_management/setup_weave-kube.yml
```
To specify versions of Kubernetes and Docker see Vagrant examples above.
N.B.: `--ssh-extra-args` is used to provide:
* `StrictHostKeyChecking=no`: as VMs come and go, the same IP can be used by a different machine, so checking the host's SSH key may fail. Note that this introduces a risk of a man-in-the-middle attack.
* `UserKnownHostsFile=/dev/null`: if you previously connected a VM with the same IP but a different public key, and added it to `~/.ssh/known_hosts`, SSH may still fail to connect, hence we use `/dev/null` instead of `~/.ssh/known_hosts`.
### Docker installation role
Various ways to install Docker are provided:
- `docker-from-docker-ce-repo`
- `docker-from-docker-repo`
- `docker-from-get.docker.com`
- `docker-from-tarball`
each producing a slightly different outcome, which can be useful for testing various setup scenarios.
The `docker-install` role selects one of the above ways to install Docker based on the `docker_install_role` variable.
The default value for this variable is configured in `group_vars/all`.
You can however override it with whichever role you would want to run by passing the name of the role as a key-value pair in `extra-vars`, e.g.:
```
ansible-playbook <playbook>.yml \
--extra-vars "docker_install_role=docker-from-docker-ce-repo"
```
## Resources
* [https://www.vagrantup.com/docs/provisioning/ansible.html](https://www.vagrantup.com/docs/provisioning/ansible.html)
* [http://docs.ansible.com/ansible/guide_vagrant.html](http://docs.ansible.com/ansible/guide_vagrant.html)

View File

@ -0,0 +1,11 @@
---
go_version: 1.8.1
terraform_version: 0.8.5
docker_version: 17.06
docker_install_role: 'docker-from-docker-ce-repo'
kubernetes_version: 1.6.1
kubernetes_cni_version: 0.5.1
kubernetes_token: '123456.0123456789123456'
etcd_container_version: 2.2.5
kube_discovery_container_version: 1.0
pause_container_version: 3.0

View File

@ -0,0 +1,33 @@
---
################################################################################
# Install Ansible's dependencies: python and lsb_release, required respectively
# to run Ansible modules and gather Ansible facts.
#
# See also:
# - http://docs.ansible.com/ansible/intro_installation.html#managed-node-requirements
# - http://docs.ansible.com/ansible/setup_module.html
################################################################################
- name: check if python is installed (as required by ansible modules)
raw: test -e /usr/bin/python
register: is_python_installed
failed_when: is_python_installed.rc not in [0, 1]
changed_when: false # never mutates state.
- name: install python if missing (as required by ansible modules)
when: is_python_installed|failed # skip otherwise
raw: (test -e /usr/bin/apt-get && apt-get update && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum update && yum install -y python)
changed_when: is_python_installed.rc == 1
- name: check if lsb_release is installed (as required for ansible facts)
raw: test -e /usr/bin/lsb_release
register: is_lsb_release_installed
failed_when: is_lsb_release_installed.rc not in [0, 1]
changed_when: false # never mutates state.
- name: install lsb_release if missing (as required for ansible facts)
when: is_lsb_release_installed|failed # skip otherwise
raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y redhat-lsb-core)
changed_when: is_lsb_release_installed.rc == 1
- setup: # gather 'facts', i.e. compensates for 'gather_facts: false' in calling playbook.

View File

@ -0,0 +1,2 @@
[Timer]
Persistent=false

View File

@ -0,0 +1,48 @@
---
# Set up Development Environment.
- name: install development tools
package:
name: "{{ item }}"
state: present
with_items:
# weave net dependencies
- make
- vagrant
# ansible dependencies
- python-pip
- python-dev
- libffi-dev
- libssl-dev
# terraform dependencies
- unzip
# other potentially useful tools:
- aufs-tools
- ethtool
- iputils-arping
- libpcap-dev
- git
- mercurial
- bc
- jq
- name: install ansible
pip:
name: ansible
state: present
- name: install terraform
unarchive:
src: 'https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_linux_{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.zip'
remote_src: yes
dest: /usr/bin
mode: 0555
creates: /usr/bin/terraform
# Ubuntu runs an apt update process that will run on first boot from image.
# This is of questionable value when the machines are only going to live for a few minutes.
# If you leave them on they will run the process daily.
# Also we have seen the update process create a 'defunct' process which then throws off Weave Net smoke-test checks.
# So, we override the 'persistent' setting so it will still run at the scheduled time but will not try to catch up on first boot.
- name: copy apt daily override
copy: src=apt-daily.timer.conf dest=/etc/systemd/system/apt-daily.timer.d/

View File

@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H fd:// -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay --insecure-registry "weave-ci-registry:5000"

View File

@ -0,0 +1,36 @@
---
# Configure Docker
# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
- name: ensure docker group is present (or create it)
group:
name: docker
state: present
- name: add user to docker group (avoids sudo-ing)
user:
name: "{{ ansible_user }}"
group: docker
state: present
- name: ensure docker's systemd directory exists
file:
path: /etc/systemd/system/docker.service.d
state: directory
recurse: yes
when: ansible_os_family != "RedHat"
- name: enable docker remote api over tcp
copy:
src: "{{ role_path }}/files/docker.conf"
dest: /etc/systemd/system/docker.service.d/docker.conf
register: docker_conf
when: ansible_os_family != "RedHat"
- name: restart docker service
systemd:
name: docker
state: restarted
daemon_reload: yes # ensure docker.conf is picked up.
enabled: yes
when: docker_conf.changed or ansible_os_family == "RedHat"

View File

@ -0,0 +1,35 @@
---
# Debian / Ubuntu specific:
- name: install dependencies for docker repository
package:
name: "{{ item }}"
state: present
with_items:
- apt-transport-https
- ca-certificates
- name: add apt key for the docker repository
apt_key:
keyserver: hkp://ha.pool.sks-keyservers.net:80
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
state: present
register: apt_key_docker_repo
- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }})
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename|lower }} stable
state: present
register: apt_docker_repo
- name: update apt's cache
apt:
update_cache: yes
when: apt_key_docker_repo.changed or apt_docker_repo.changed
- name: install docker-engine
package:
name: "{{ item }}"
state: present
with_items:
- docker-ce={{ docker_version }}*

View File

@ -0,0 +1,10 @@
---
# Set up Docker
# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
# Distribution-specific tasks:
- include: debian.yml
when: ansible_os_family == "Debian"
- include: redhat.yml
when: ansible_os_family == "RedHat"

View File

@ -0,0 +1,29 @@
# Docker installation from Docker's CentOS Community Edition
# See also: https://docs.docker.com/engine/installation/linux/centos/
- name: remove all potentially pre existing packages
yum:
name: '{{ item }}'
state: absent
with_items:
- docker
- docker-common
- container-selinux
- docker-selinux
- docker-engine
- name: install yum-utils
yum:
name: yum-utils
state: present
- name: add docker ce repo
command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# Note that Docker CE versions do not follow regular Docker versions, but look
# like, for example: "17.03.0.el7"
- name: install docker
yum:
name: 'docker-ce-{{ docker_version }}'
update_cache: yes
state: present

View File

@ -0,0 +1,35 @@
---
# Debian / Ubuntu specific:
- name: install dependencies for docker repository
package:
name: "{{ item }}"
state: present
with_items:
- apt-transport-https
- ca-certificates
- name: add apt key for the docker repository
apt_key:
keyserver: hkp://ha.pool.sks-keyservers.net:80
id: 58118E89F3A912897C070ADBF76221572C52609D
state: present
register: apt_key_docker_repo
- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }})
apt_repository:
repo: deb https://apt.dockerproject.org/repo {{ ansible_distribution | lower }}-{{ ansible_distribution_release }} main
state: present
register: apt_docker_repo
- name: update apt's cache
apt:
update_cache: yes
when: apt_key_docker_repo.changed or apt_docker_repo.changed
- name: install docker-engine
package:
name: "{{ item }}"
state: present
with_items:
- docker-engine={{ docker_version }}*

View File

@ -0,0 +1,10 @@
---
# Set up Docker
# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
# Distribution-specific tasks:
- include: debian.yml
when: ansible_os_family == "Debian"
- include: redhat.yml
when: ansible_os_family == "RedHat"

View File

@ -0,0 +1,25 @@
---
# RedHat / CentOS specific:
- name: add docker' yum repository (centos/{{ ansible_lsb.major_release }})
yum_repository:
name: docker
description: Docker YUM repo
file: external_repos
baseurl: https://yum.dockerproject.org/repo/main/centos/{{ ansible_lsb.major_release }}
enabled: yes
gpgkey: https://yum.dockerproject.org/gpg
gpgcheck: yes
state: present
- name: update yum's cache
yum:
name: "*"
update_cache: yes
- name: install docker-engine
package:
name: "{{ item }}"
state: present
with_items:
- docker-engine-{{ docker_version }}

View File

@ -0,0 +1,8 @@
---
# Debian / Ubuntu specific:
- name: apt-import gpg key for the docker repository
shell: curl -sSL https://get.docker.com/gpg | sudo apt-key add -
- name: install docker
shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine={{ docker_version }}*/ -e s/docker-ce/docker-ce={{ docker_version }}*/ | sh'

View File

@ -0,0 +1,10 @@
---
# Set up Docker
# See also: legacy gce.sh script
# Distribution-specific tasks:
- include: debian.yml
when: ansible_os_family == "Debian"
- include: redhat.yml
when: ansible_os_family == "RedHat"

View File

@ -0,0 +1,11 @@
---
# RedHat / CentOS specific:
- name: rpm-import gpg key for the docker repository
shell: curl -sSLo /tmp/docker.gpg https://get.docker.com/gpg && sudo rpm --import /tmp/docker.gpg
- name: install docker
shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine-{{ docker_version }}*/ | sh'
- name: wait for docker installation to complete
shell: yum install -y yum-utils && yum-complete-transaction

View File

@ -0,0 +1,61 @@
---
# Set up Docker
# See also:
# - https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
# - https://github.com/docker/docker/releases
- include_role:
name: docker-prerequisites
- name: install daemon
package:
name: daemon
state: present
- name: 'create directory {{ docker_dir }}/{{ docker_version }}'
file:
path: '{{ docker_dir }}/{{ docker_version }}'
state: directory
mode: 0755
- name: download and extract docker
unarchive:
src: 'https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz'
remote_src: yes
dest: '{{ docker_dir }}/{{ docker_version }}'
extra_opts: '--strip-components=1'
mode: 0555
creates: '{{ docker_dir }}/{{ docker_version }}/docker'
- name: create symlink to current version
file:
src: '{{ docker_dir }}/{{ docker_version }}'
dest: '{{ docker_dir }}/current'
state: link
mode: 0555
- name: list all files to symlink
find:
paths: '{{ docker_dir }}/current'
file_type: file
register: binaries
changed_when: false
- name: create symlinks to all binaries
file:
src: '{{ item }}'
dest: /usr/bin/{{ item | basename }}
state: link
with_items: "{{ binaries.files | map(attribute='path') | list }}"
- name: killall docker
command: killall docker
register: killall
failed_when: false
changed_when: killall.rc == 0
- name: start dockerd
command: daemon -- /usr/bin/dockerd
- include_role:
name: docker-configuration

View File

@ -0,0 +1,5 @@
---
docker_dir: '/opt/docker'
docker_url: '{{ "rc" in {{ docker_version }} | ternary( >
"https://test.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz", >
"https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz") }}'

View File

@ -0,0 +1,30 @@
---
# Set up Docker
- include_role:
name: docker-prerequisites
# Dynamically include docker installation role using 'when' as Ansible does not
# allow for include_role's name to be set to a variable. Indeed:
# - include_role:
# name: '{{ docker_install_role }}'
# fails with:
# ERROR! 'docker_install_role' is undefined
- include_role:
name: docker-from-docker-repo
when: docker_install_role == 'docker-from-docker-repo'
- include_role:
name: docker-from-docker-ce-repo
when: docker_install_role == 'docker-from-docker-ce-repo'
- include_role:
name: docker-from-get.docker.com
when: docker_install_role == 'docker-from-get.docker.com'
- include_role:
name: docker-from-tarball
when: docker_install_role == 'docker-from-tarball'
- include_role:
name: docker-configuration

View File

@ -0,0 +1,11 @@
---
# Install Docker's dependencies
# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
- name: install linux-image-extra-*/virtual
package:
name: "{{ item }}"
state: present
with_items:
- linux-image-extra-{{ ansible_kernel }}
- linux-image-extra-virtual

View File

@ -0,0 +1,5 @@
---
# Distribution-specific tasks:
- include: debian.yml
when: ansible_os_family == "Debian"

View File

@ -0,0 +1,36 @@
---
# Set up Go.
- name: install go
unarchive:
src: 'https://storage.googleapis.com/golang/go{{ go_version }}.linux-{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.tar.gz'
remote_src: yes
dest: /usr/local
mode: 0777
creates: /usr/local/go/bin/go
- name: set go env. vars. and add go to path
blockinfile:
dest: '$HOME/.bashrc'
block: |
export PATH=$PATH:/usr/local/go/bin
export GOPATH=$HOME
state: present
create: yes
mode: 0644
become: '{{ item }}'
with_items:
- true # Run as root
- false # Run as SSH user
- name: source ~/.bashrc from ~/.bash_profile
lineinfile:
dest: '$HOME/.bash_profile'
line: '[ -r $HOME/.bashrc ] && source $HOME/.bashrc'
state: present
create: yes
mode: 0644
become: '{{ item }}'
with_items:
- true # Run as root
- false # Run as SSH user

View File

@ -0,0 +1,14 @@
---
- name: check if kubelet service exists
stat:
path: /etc/init.d/kubelet
register: kubelet
# avoids having weave-net and weave-kube conflict in some test cases (e.g. 130_expose_test.sh)
- name: stop kubelet service
systemd:
name: kubelet
state: stopped
enabled: no
when: kubelet.stat.exists

View File

@ -0,0 +1,14 @@
---
- name: docker pull images used by k8s tests
docker_image:
name: '{{ item }}'
state: present
with_items:
- gcr.io/google_containers/etcd-amd64:{{ etcd_container_version }}
- gcr.io/google_containers/kube-apiserver-amd64:v{{ kubernetes_version }}
- gcr.io/google_containers/kube-controller-manager-amd64:v{{ kubernetes_version }}
- gcr.io/google_containers/kube-proxy-amd64:v{{ kubernetes_version }}
- gcr.io/google_containers/kube-scheduler-amd64:v{{ kubernetes_version }}
- gcr.io/google_containers/kube-discovery-amd64:{{ kube_discovery_container_version }}
- gcr.io/google_containers/pause-amd64:{{ pause_container_version }}

View File

@ -0,0 +1,37 @@
---
# Debian / Ubuntu specific:
- name: add apt key for the kubernetes repository
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
register: apt_key_k8s_repo
- name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }})
apt_repository:
repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }} main
state: present
register: apt_k8s_repo
when: '"alpha" not in kubernetes_version and "beta" not in kubernetes_version'
- name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }}-unstable)
apt_repository:
repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }}-unstable main
state: present
register: apt_k8s_repo
when: '"alpha" in kubernetes_version or "beta" in kubernetes_version'
- name: update apt's cache
apt:
update_cache: yes
when: apt_key_k8s_repo.changed or apt_k8s_repo.changed
- name: install kubelet and kubectl
package:
name: "{{ item }}"
state: present
with_items:
- kubelet={{ kubernetes_version }}*
- kubectl={{ kubernetes_version }}*
- kubeadm={{ kubernetes_version }}*
- kubernetes-cni={{ kubernetes_cni_version }}*

View File

@ -0,0 +1,16 @@
---
# Install Kubernetes
# Distribution-specific tasks:
- include: debian.yml
when: ansible_os_family == "Debian"
- include: redhat.yml
when: ansible_os_family == "RedHat"
- name: install ebtables
package:
name: "{{ item }}"
state: present
with_items:
- ebtables

View File

@ -0,0 +1,30 @@
---
# RedHat / CentOS specific:
- name: add kubernetes' yum repository (kubernetes-el{{ ansible_lsb.major_release }}-x86-64)
yum_repository:
name: kubernetes
description: Kubernetes YUM repo
file: external_repos
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el{{ ansible_lsb.major_release }}-x86_64
enabled: yes
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
gpgcheck: yes
state: present
register: yum_k8s_repo
- name: update yum's cache
yum:
name: "*"
update_cache: yes
when: yum_k8s_repo.changed
- name: install kubelet and kubectl
package:
name: "{{ item }}"
state: present
with_items:
- kubelet-{{ kubernetes_version }}*
- kubectl-{{ kubernetes_version }}*
- kubeadm-{{ kubernetes_version }}*
- kubernetes-cni-{{ kubernetes_cni_version }}*

View File

@ -0,0 +1,42 @@
---
# Start Kubernetes
- name: kubeadm reset
command: kubeadm reset
- name: restart kubelet service
systemd:
name: kubelet
state: restarted
enabled: yes
- name: optionally set kubeconfig option
set_fact:
kubeconfig: '{{ (kubernetes_version >= "1.5.4") | ternary("--kubeconfig /etc/kubernetes/admin.conf", "") }}'
kubernetes_version_option: '{{ (kubernetes_version >= "1.6") | ternary("kubernetes_version", "use-kubernetes-version") }}'
- name: kubeadm init on the master
command: 'kubeadm init --{{ kubernetes_version_option }}=v{{ kubernetes_version }} --token={{ kubernetes_token }}'
when: ' {{ play_hosts[0] == inventory_hostname }}'
- name: allow pods to be run on the master (if only node)
command: 'kubectl {{ kubeconfig }} taint nodes --all {{ (kubernetes_version < "1.6") | ternary("dedicated-", "node-role.kubernetes.io/master:NoSchedule-") }}'
when: '{{ play_hosts | length }} == 1'
- name: kubeadm join on workers
command: 'kubeadm join --token={{ kubernetes_token }} {{ hostvars[play_hosts[0]].private_ip }}{{ (kubernetes_version > "1.6") | ternary(":6443", "") }}'
when: ' {{ play_hosts[0] != inventory_hostname }}'
- name: list kubernetes' pods
command: kubectl {{ kubeconfig }} get pods --all-namespaces
when: ' {{ play_hosts[0] == inventory_hostname }}'
changed_when: false
register: kubectl_get_pods
tags:
- output
- name: print outpout of `kubectl get pods --all-namespaces`
debug: msg="{{ kubectl_get_pods.stdout_lines }}"
when: ' {{ play_hosts[0] == inventory_hostname }}'
tags:
- output

View File

@ -0,0 +1,26 @@
---
# Set machine up to be able to run ansible playbooks.
- name: check if python is installed (as required by ansible modules)
raw: test -e /usr/bin/python
register: is_python_installed
failed_when: is_python_installed.rc not in [0, 1]
changed_when: false # never mutates state.
- name: install python if missing (as required by ansible modules)
when: is_python_installed|failed # skip otherwise
raw: (test -e /usr/bin/apt-get && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum install -y python)
changed_when: is_python_installed.rc == 1
- name: check if lsb_release is installed (as required for ansible facts)
raw: test -e /usr/bin/lsb_release
register: is_lsb_release_installed
failed_when: is_lsb_release_installed.rc not in [0, 1]
changed_when: false # never mutates state.
- name: install lsb_release if missing (as required for ansible facts)
when: is_lsb_release_installed|failed # skip otherwise
raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y lsb_release)
changed_when: is_lsb_release_installed.rc == 1
- setup: # gather 'facts', i.e. compensates for the above 'gather_facts: false'.

View File

@ -0,0 +1,34 @@
---
# Set up sock-shop on top of Kubernetes.
# Dependencies on other roles:
# - kubernetes
- name: create sock-shop namespace in k8s
command: kubectl --kubeconfig /etc/kubernetes/admin.conf create namespace sock-shop
- name: create sock-shop in k8s
command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -n sock-shop -f "https://github.com/microservices-demo/microservices-demo/blob/master/deploy/kubernetes/complete-demo.yaml?raw=true"
- name: describe front-end service
command: kubectl --kubeconfig /etc/kubernetes/admin.conf describe svc front-end -n sock-shop
changed_when: false
register: kubectl_describe_svc_frontend
tags:
- output
- name: print outpout of `kubectl describe svc front-end -n sock-shop`
debug: msg="{{ kubectl_describe_svc_frontend.stdout_lines }}"
tags:
- output
- name: list sock-shop k8s' pods
command: kubectl --kubeconfig /etc/kubernetes/admin.conf get pods -n sock-shop
changed_when: false
register: kubectl_get_pods
tags:
- output
- name: print outpout of `kubectl get pods -n sock-shop`
debug: msg="{{ kubectl_get_pods.stdout_lines }}"
tags:
- output

View File

@ -0,0 +1,24 @@
---
# Set up Weave Kube on top of Kubernetes.
- name: set url for weave-kube daemonset
set_fact:
weave_kube_url: '{{ (kubernetes_version < "1.6") | ternary("https://git.io/weave-kube", "https://git.io/weave-kube-1.6") }}'
- name: configure weave net's cni plugin
command: 'kubectl {{ kubeconfig }} apply -f {{ weave_kube_url }}'
when: '{{ play_hosts[0] == inventory_hostname }}'
- name: list kubernetes' pods
command: 'kubectl {{ kubeconfig }} get pods --all-namespaces'
when: '{{ play_hosts[0] == inventory_hostname }}'
changed_when: false
register: kubectl_get_pods
tags:
- output
- name: print outpout of `kubectl get pods --all-namespaces`
debug: msg="{{ kubectl_get_pods.stdout_lines }}"
when: '{{ play_hosts[0] == inventory_hostname }}'
tags:
- output

View File

@ -0,0 +1,24 @@
---
# Set up Development Environment for Weave Net.
- name: check if weave net has been checked out
become: false # Run as SSH-user
stat:
path: $HOME/src/github.com/weaveworks/weave
register: weave
failed_when: false
changed_when: false
- name: git clone weave net
become: false # Run as SSH-user
git:
repo: https://github.com/weaveworks/weave.git
dest: $HOME/src/github.com/weaveworks/weave
when: not weave.stat.exists
- name: create a convenience symlink to $HOME/src/github.com/weaveworks/weave
become: false # Run as SSH-user
file:
src: $HOME/src/github.com/weaveworks/weave
dest: $HOME/weave
state: link

View File

@ -0,0 +1,56 @@
---
- name: install epel-release
package:
name: "{{ item }}"
state: present
with_items:
- epel-release
when: ansible_os_family == "RedHat"
- name: install jq
package:
name: "{{ item }}"
state: present
with_items:
- jq
- name: install ethtool (used by the weave script)
package:
name: "{{ item }}"
state: present
with_items:
- ethtool
- name: install nsenter (used by the weave script)
command: docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
- name: install pip (for docker-py)
package:
name: "{{ item }}"
state: present
with_items:
- python-pip
- name: install docker-py (for docker_image)
pip:
name: docker-py
state: present
- name: docker pull images used by tests
docker_image:
name: '{{ item }}'
state: present
with_items:
- alpine
- aanand/docker-dnsutils
- weaveworks/hello-world
- name: docker pull docker-py which is used by tests
docker_image:
name: joffrey/docker-py
tag: '{{ item }}'
state: present
with_items:
- '1.8.1'
- '1.9.0-rc2'

View File

@ -0,0 +1,26 @@
---
# Set up Weave Net.
- name: install weave net
get_url:
url: https://git.io/weave
dest: /usr/local/bin/weave
mode: 0555
- name: stop weave net
command: /usr/local/bin/weave stop
- name: start weave net
command: /usr/local/bin/weave launch
- name: get weave net's status
command: /usr/local/bin/weave status
changed_when: false
register: weave_status
tags:
- output
- name: print outpout of `weave status`
debug: msg="{{ weave_status.stdout_lines }}"
tags:
- output

View File

@ -0,0 +1,27 @@
---
################################################################################
# Install Docker and Kubernetes, and configure Kubernetes to
# use Weave Net's CNI plugin (a.k.a. Weave Kube).
#
# See also:
# - http://kubernetes.io/docs/getting-started-guides/kubeadm/
# - https://github.com/weaveworks/weave-kube
################################################################################
- name: install docker, kubernetes and weave-kube
hosts: all
gather_facts: false # required in case Python is not available on the host
become: true
become_user: root
pre_tasks:
- include: library/setup_ansible_dependencies.yml
roles:
- docker-install
- weave-net-utilities
- kubernetes-install
- kubernetes-docker-images
- kubelet-stop
- kubernetes-start
- weave-kube

View File

@ -0,0 +1,18 @@
---
################################################################################
# Install Docker from Docker's official repository and Weave Net.
################################################################################
- name: install docker and weave net for development
hosts: all
gather_facts: false # required in case Python is not available on the host
become: true
become_user: root
pre_tasks:
- include: library/setup_ansible_dependencies.yml
roles:
- docker-install
- weave-net-utilities
- weave-net

View File

@ -0,0 +1,20 @@
---
################################################################################
# Install Docker from Docker's official repository and Weave Net.
################################################################################
- name: install docker and weave net for development
hosts: all
gather_facts: false # required in case Python is not available on the host
become: true
become_user: root
pre_tasks:
- include: library/setup_ansible_dependencies.yml
roles:
- dev-tools
- golang-from-tarball
- docker-install
# Do not run this role when building with Vagrant, as sources have been already checked out:
- { role: weave-net-sources, when: "ansible_user != 'vagrant'" }

View File

@ -0,0 +1,20 @@
---
################################################################################
# Install Docker from Docker's official repository and Weave Net.
################################################################################
- name: install docker and weave net for testing
hosts: all
gather_facts: false # required in case Python is not available on the host
become: true
become_user: root
pre_tasks:
- include: library/setup_ansible_dependencies.yml
roles:
- docker-install
- weave-net-utilities
- kubernetes-install
- kubernetes-docker-images
- kubelet-stop

91
dependencies/cross_versions.py vendored Executable file
View File

@ -0,0 +1,91 @@
#!/usr/bin/env python
# Generate the cross product of latest versions of Weave Net's dependencies:
# - Go
# - Docker
# - Kubernetes
#
# Dependencies:
# - python
# - git
# - list_versions.py
#
# Testing:
# $ python -m doctest -v cross_versions.py
from os import linesep
from sys import argv, exit, stdout, stderr
from getopt import getopt, GetoptError
from list_versions import DEPS, get_versions_from, filter_versions
from itertools import product
# See also: /usr/include/sysexits.h
_ERROR_RUNTIME = 1
_ERROR_ILLEGAL_ARGS = 64
def _usage(error_message=None):
if error_message:
stderr.write('ERROR: ' + error_message + linesep)
stdout.write(
linesep.join([
'Usage:', ' cross_versions.py [OPTION]...', 'Examples:',
' cross_versions.py', ' cross_versions.py -r',
' cross_versions.py --rc', ' cross_versions.py -l',
' cross_versions.py --latest', 'Options:',
'-l/--latest Include only the latest version of each major and'
' minor versions sub-tree.',
'-r/--rc Include release candidate versions.',
'-h/--help Prints this!', ''
]))
def _validate_input(argv):
try:
config = {'rc': False, 'latest': False}
opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
for opt, value in opts:
if opt in ('-h', '--help'):
_usage()
exit()
if opt in ('-l', '--latest'):
config['latest'] = True
if opt in ('-r', '--rc'):
config['rc'] = True
if len(args) != 0:
raise ValueError('Unsupported argument(s): %s.' % args)
return config
except GetoptError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
except ValueError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
def _versions(dependency, config):
return map(str,
filter_versions(
get_versions_from(DEPS[dependency]['url'],
DEPS[dependency]['re']),
DEPS[dependency]['min'], **config))
def cross_versions(config):
docker_versions = _versions('docker', config)
k8s_versions = _versions('kubernetes', config)
return product(docker_versions, k8s_versions)
def main(argv):
try:
config = _validate_input(argv)
print(linesep.join('\t'.join(triple)
for triple in cross_versions(config)))
except Exception as e:
print(str(e))
exit(_ERROR_RUNTIME)
if __name__ == '__main__':
main(argv[1:])

85
dependencies/list_os_images.sh vendored Executable file
View File

@ -0,0 +1,85 @@
#!/bin/bash
function usage() {
cat <<EOF
Description:
Script to list OS images, sorted and in a Terraform-friendly format.
Dependencies:
- gcloud, Google Cloud Platform's CLI
- aws,
Usage:
\$ ./$(basename "$0") PROVIDER OS
PROVIDER={gcp}
OS={ubuntu|debian|centos}
Example:
\$ ./$(basename "$0") gcp ubuntu
ubuntu-os-cloud/ubuntu-1204-lts
ubuntu-os-cloud/ubuntu-1404-lts
ubuntu-os-cloud/ubuntu-1604-lts
ubuntu-os-cloud/ubuntu-1610
EOF
}
function find_aws_owner_id() {
local os_owner_ids=(
"ubuntu:099720109477"
"debian:379101102735"
"centos:679593333241"
)
for os_owner_id in "${os_owner_ids[@]}"; do
os=${os_owner_id%%:*}
owner_id=${os_owner_id#*:}
if [ "$os" == "$1" ]; then
echo "$owner_id"
return 0
fi
done
echo >&2 "No AWS owner ID for $1."
exit 1
}
if [ -z "$1" ]; then
echo >&2 "No specified provider."
usage
exit 1
fi
if [ -z "$2" ]; then
if [ "$1" == "help" ]; then
usage
exit 0
else
echo >&2 "No specified operating system."
usage
exit 1
fi
fi
case "$1" in
'gcp')
gcloud compute images list --standard-images --regexp=".*?$2.*" \
--format="csv[no-heading][separator=/](selfLink.map().scope(projects).segment(0),family)" \
| sort -d
;;
'aws')
aws --region "${3:-us-east-1}" ec2 describe-images \
--owners "$(find_aws_owner_id "$2")" \
--filters "Name=name,Values=$2*" \
--query 'Images[*].{name:Name,id:ImageId}'
# Other examples:
# - CentOS: aws --region us-east-1 ec2 describe-images --owners aws-marketplace --filters Name=product-code,Values=aw0evgkw8e5c1q413zgy5pjce
# - Debian: aws --region us-east-1 ec2 describe-images --owners 379101102735 --filters "Name=architecture,Values=x86_64" "Name=name,Values=debian-jessie-*" "Name=root-device-type,Values=ebs" "Name=virtualization-type,Values=hvm"
;;
'do')
curl -s -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \
"https://api.digitalocean.com/v2/images?page=1&per_page=999999" \
| jq --raw-output ".images | .[] | .slug" | grep "$2" | sort -d
;;
*)
echo >&2 "Unknown provider [$1]."
usage
exit 1
;;
esac

343
dependencies/list_versions.py vendored Executable file
View File

@ -0,0 +1,343 @@
#!/usr/bin/env python
# List all available versions of Weave Net's dependencies:
# - Go
# - Docker
# - Kubernetes
#
# Depending on the parameters passed, it can gather the equivalent of the below
# bash one-liners:
# git ls-remote --tags https://github.com/golang/go \
# | grep -oP '(?<=refs/tags/go)[\.\d]+$' \
# | sort --version-sort
# git ls-remote --tags https://github.com/golang/go \
# | grep -oP '(?<=refs/tags/go)[\.\d]+rc\d+$' \
# | sort --version-sort \
# | tail -n 1
# git ls-remote --tags https://github.com/docker/docker \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \
# | sort --version-sort
# git ls-remote --tags https://github.com/docker/docker \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-rc\d*$' \
# | sort --version-sort \
# | tail -n 1
# git ls-remote --tags https://github.com/kubernetes/kubernetes \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \
# | sort --version-sort
# git ls-remote --tags https://github.com/kubernetes/kubernetes \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-beta\.\d+$' \
# | sort --version-sort | tail -n 1
#
# Dependencies:
# - python
# - git
#
# Testing:
# $ python -m doctest -v list_versions.py
from os import linesep, path
from sys import argv, exit, stdout, stderr
from getopt import getopt, GetoptError
from subprocess import Popen, PIPE
from pkg_resources import parse_version
from itertools import groupby
from six.moves import filter
import shlex
import re
# See also: /usr/include/sysexits.h
_ERROR_RUNTIME = 1
_ERROR_ILLEGAL_ARGS = 64
_TAG_REGEX = '^[0-9a-f]{40}\s+refs/tags/%s$'
_VERSION = 'version'
DEPS = {
'go': {
'url': 'https://github.com/golang/go',
're': 'go(?P<%s>[\d\.]+(?:rc\d)*)' % _VERSION,
'min': None
},
'docker': {
'url': 'https://github.com/docker/docker',
're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-rc\d)*)' % _VERSION,
# Weave Net only works with Docker from 1.10.0 onwards, so we ignore
# all previous versions:
'min': '1.10.0',
},
'kubernetes': {
'url': 'https://github.com/kubernetes/kubernetes',
're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-beta\.\d)*)' % _VERSION,
# Weave Kube requires Kubernetes 1.4.2+, so we ignore all previous
# versions:
'min': '1.4.2',
}
}
class Version(object):
''' Helper class to parse and manipulate (sort, filter, group) software
versions. '''
def __init__(self, version):
self.version = version
self.digits = [
int(x) if x else 0
for x in re.match('(\d*)\.?(\d*)\.?(\d*).*?', version).groups()
]
self.major, self.minor, self.patch = self.digits
self.__parsed = parse_version(version)
self.is_rc = self.__parsed.is_prerelease
def __lt__(self, other):
return self.__parsed.__lt__(other.__parsed)
def __gt__(self, other):
return self.__parsed.__gt__(other.__parsed)
def __le__(self, other):
return self.__parsed.__le__(other.__parsed)
def __ge__(self, other):
return self.__parsed.__ge__(other.__parsed)
def __eq__(self, other):
return self.__parsed.__eq__(other.__parsed)
def __ne__(self, other):
return self.__parsed.__ne__(other.__parsed)
def __str__(self):
return self.version
def __repr__(self):
return self.version
def _read_go_version_from_dockerfile():
# Read Go version from weave/build/Dockerfile
dockerfile_path = path.join(
path.dirname(path.dirname(path.dirname(path.realpath(__file__)))),
'build', 'Dockerfile')
with open(dockerfile_path, 'r') as f:
for line in f:
m = re.match('^FROM golang:(\S*)$', line)
if m:
return m.group(1)
raise RuntimeError(
"Failed to read Go version from weave/build/Dockerfile."
" You may be running this script from somewhere else than weave/tools."
)
def _try_set_min_go_version():
''' Set the current version of Go used to build Weave Net's containers as
the minimum version. '''
try:
DEPS['go']['min'] = _read_go_version_from_dockerfile()
except IOError as e:
stderr.write('WARNING: No minimum Go version set. Root cause: %s%s' %
(e, linesep))
def _sanitize(out):
return out.decode('ascii').strip().split(linesep)
def _parse_tag(tag, version_pattern, debug=False):
''' Parse Git tag output's line using the provided `version_pattern`, e.g.:
>>> _parse_tag(
'915b77eb4efd68916427caf8c7f0b53218c5ea4a refs/tags/v1.4.6',
'v(?P<version>\d+\.\d+\.\d+(?:\-beta\.\d)*)')
'1.4.6'
'''
pattern = _TAG_REGEX % version_pattern
m = re.match(pattern, tag)
if m:
return m.group(_VERSION)
elif debug:
stderr.write(
'ERROR: Failed to parse version out of tag [%s] using [%s].%s' %
(tag, pattern, linesep))
def get_versions_from(git_repo_url, version_pattern):
''' Get release and release candidates' versions from the provided Git
repository. '''
git = Popen(
shlex.split('git ls-remote --tags %s' % git_repo_url), stdout=PIPE)
out, err = git.communicate()
status_code = git.returncode
if status_code != 0:
raise RuntimeError('Failed to retrieve git tags from %s. '
'Status code: %s. Output: %s. Error: %s' %
(git_repo_url, status_code, out, err))
return list(
filter(None, (_parse_tag(line, version_pattern)
for line in _sanitize(out))))
def _tree(versions, level=0):
''' Group versions by major, minor and patch version digits. '''
if not versions or level >= len(versions[0].digits):
return # Empty versions or no more digits to group by.
versions_tree = []
for _, versions_group in groupby(versions, lambda v: v.digits[level]):
subtree = _tree(list(versions_group), level + 1)
if subtree:
versions_tree.append(subtree)
# Return the current subtree if non-empty, or the list of "leaf" versions:
return versions_tree if versions_tree else versions
def _is_iterable(obj):
'''
Check if the provided object is an iterable collection, i.e. not a string,
e.g. a list, a generator:
>>> _is_iterable('string')
False
>>> _is_iterable([1, 2, 3])
True
>>> _is_iterable((x for x in [1, 2, 3]))
True
'''
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def _leaf_versions(tree, rc):
'''
Recursively traverse the versions tree in a depth-first fashion,
and collect the last node of each branch, i.e. leaf versions.
'''
versions = []
if _is_iterable(tree):
for subtree in tree:
versions.extend(_leaf_versions(subtree, rc))
if not versions:
if rc:
last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None)
last_prod = next(
filter(lambda v: not v.is_rc, reversed(tree)), None)
if last_rc and last_prod and (last_prod < last_rc):
versions.extend([last_prod, last_rc])
elif not last_prod:
versions.append(last_rc)
else:
# Either there is no RC, or we ignore the RC as older than
# the latest production version:
versions.append(last_prod)
else:
versions.append(tree[-1])
return versions
def filter_versions(versions, min_version=None, rc=False, latest=False):
''' Filter provided versions
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=False, rc=False)
[1.0.0, 1.0.1, 1.1.1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=True, rc=False)
[1.0.1, 1.1.1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=False, rc=True)
[1.0.0-beta.1, 1.0.0, 1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version='1.1.0', latest=False, rc=True)
[1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=True, rc=True)
[1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version='1.1.0', latest=True, rc=True)
[1.1.1, 1.1.2-rc1, 2.0.0]
'''
versions = sorted([Version(v) for v in versions])
if min_version:
min_version = Version(min_version)
versions = [v for v in versions if v >= min_version]
if not rc:
versions = [v for v in versions if not v.is_rc]
if latest:
versions_tree = _tree(versions)
return _leaf_versions(versions_tree, rc)
else:
return versions
def _usage(error_message=None):
if error_message:
stderr.write('ERROR: ' + error_message + linesep)
stdout.write(
linesep.join([
'Usage:', ' list_versions.py [OPTION]... [DEPENDENCY]',
'Examples:', ' list_versions.py go',
' list_versions.py -r docker',
' list_versions.py --rc docker',
' list_versions.py -l kubernetes',
' list_versions.py --latest kubernetes', 'Options:',
'-l/--latest Include only the latest version of each major and'
' minor versions sub-tree.',
'-r/--rc Include release candidate versions.',
'-h/--help Prints this!', ''
]))
def _validate_input(argv):
try:
config = {'rc': False, 'latest': False}
opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
for opt, value in opts:
if opt in ('-h', '--help'):
_usage()
exit()
if opt in ('-l', '--latest'):
config['latest'] = True
if opt in ('-r', '--rc'):
config['rc'] = True
if len(args) != 1:
raise ValueError('Please provide a dependency to get versions of.'
' Expected 1 argument but got %s: %s.' %
(len(args), args))
dependency = args[0].lower()
if dependency not in DEPS.keys():
raise ValueError(
'Please provide a valid dependency.'
' Supported one dependency among {%s} but got: %s.' %
(', '.join(DEPS.keys()), dependency))
return dependency, config
except GetoptError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
except ValueError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
def main(argv):
try:
dependency, config = _validate_input(argv)
if dependency == 'go':
_try_set_min_go_version()
versions = get_versions_from(DEPS[dependency]['url'],
DEPS[dependency]['re'])
versions = filter_versions(versions, DEPS[dependency]['min'], **config)
print(linesep.join(map(str, versions)))
except Exception as e:
print(str(e))
exit(_ERROR_RUNTIME)
if __name__ == '__main__':
main(argv[1:])

View File

@ -4,6 +4,6 @@ set -o errexit
set -o nounset
set -o pipefail
WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; then echo "-WIP"; else echo ""; fi)
WORKING_SUFFIX=$(if git status --porcelain | grep -qE '^(?:[^?][^ ]|[^ ][^?])\s'; then echo "-WIP"; else echo ""; fi)
BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD)
echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX"

View File

@ -81,7 +81,8 @@ run_on() {
host=$1
shift 1
[ -z "$DEBUG" ] || greyly echo "Running on $host:" "$@" >&2
remote "$host" "$SSH" "$host" "$@"
# shellcheck disable=SC2086
remote "$host" $SSH "$host" "$@"
}
docker_on() {
@ -114,10 +115,8 @@ rm_containers() {
start_suite() {
for host in $HOSTS; do
[ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave"
PLUGIN_ID=$(docker_on "$host" ps -aq --filter=name=weaveplugin)
PLUGIN_FILTER="cat"
[ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID"
rm_containers "$host" "$(docker_on "$host" ps -aq 2>/dev/null | "$PLUGIN_FILTER")"
# shellcheck disable=SC2046
rm_containers "$host" $(docker_on "$host" ps -aq 2>/dev/null)
run_on "$host" "docker network ls | grep -q ' weave ' && docker network rm weave" || true
weave_on "$host" reset 2>/dev/null
done
@ -128,4 +127,4 @@ end_suite() {
whitely assert_end
}
WEAVE=$DIR/../weave
WEAVE=$DIR/../../integration/weave

View File

@ -9,7 +9,9 @@ set -e
: "${KEY_FILE:=/tmp/gce_private_key.json}"
: "${SSH_KEY_FILE:=$HOME/.ssh/gce_ssh_key}"
: "${IMAGE:=ubuntu-14-04}"
: "${IMAGE_FAMILY:=ubuntu-1404-lts}"
: "${IMAGE_PROJECT:=ubuntu-os-cloud}"
: "${USER_ACCOUNT:=ubuntu}"
: "${ZONE:=us-central1-a}"
: "${PROJECT:=}"
: "${TEMPLATE_NAME:=}"
@ -22,7 +24,9 @@ fi
SUFFIX=""
if [ -n "$CIRCLECI" ]; then
SUFFIX="-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX"
SUFFIX="-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX"
else
SUFFIX="-${USER}"
fi
# Setup authentication
@ -40,8 +44,13 @@ function vm_names() {
# Delete all vms in this account
function destroy() {
local names
# shellcheck disable=SC2046
if [ $(gcloud compute firewall-rules list "test-allow-docker$SUFFIX" 2>/dev/null | wc -l) -gt 0 ]; then
gcloud compute firewall-rules delete "test-allow-docker$SUFFIX"
fi
names="$(vm_names)"
if [ "$(gcloud compute instances list --zone "$ZONE" -q "$names" | wc -l)" -le 1 ]; then
# shellcheck disable=SC2086
if [ "$(gcloud compute instances list --zones "$ZONE" -q $names | wc -l)" -le 1 ]; then
return 0
fi
for i in {0..10}; do
@ -82,12 +91,16 @@ function try_connect() {
function install_docker_on() {
name=$1
echo "Installing Docker on $name for user ${USER_ACCOUNT}"
# shellcheck disable=SC2087
ssh -t "$name" sudo bash -x -s <<EOF
set -x
set -e
curl -sSL https://get.docker.com/gpg | sudo apt-key add -
curl -sSL https://get.docker.com/ | sh
apt-get update -qq;
apt-get install -q -y --force-yes --no-install-recommends ethtool;
usermod -a -G docker vagrant;
usermod -a -G docker "${USER_ACCOUNT}";
echo 'DOCKER_OPTS="-H unix:///var/run/docker.sock -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay"' >> /etc/default/docker;
service docker restart
EOF
@ -107,7 +120,10 @@ function setup() {
destroy
names=($(vm_names))
gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE"
gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE" --tags "test$SUFFIX" --network=test
my_ip="$(curl -s http://ipinfo.io/ip)"
gcloud compute firewall-rules create "test-allow-docker$SUFFIX" --network=test --allow tcp:2375,tcp:12375,tcp:4040,tcp:80 --target-tags "test$SUFFIX" --source-ranges "$my_ip"
gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config
@ -136,7 +152,7 @@ function setup() {
}
function make_template() {
gcloud compute instances create "$TEMPLATE_NAME" --image "$IMAGE" --zone "$ZONE"
gcloud compute instances create "$TEMPLATE_NAME" --image-family "$IMAGE_FAMILY" --image-project "$IMAGE_PROJECT" --zone "$ZONE"
gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
name="$TEMPLATE_NAME.$ZONE.$PROJECT"
try_connect "$name"
@ -155,7 +171,7 @@ function hosts() {
hosts=($hostname "${hosts[@]}")
args=("--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}")
done
echo export SSH=\"ssh -l vagrant\"
echo export SSH=\"ssh -l "${USER_ACCOUNT}"\"
echo "export HOSTS=\"${hosts[*]}\""
echo "export ADD_HOST_ARGS=\"${args[*]}\""
rm "$json"
@ -178,6 +194,9 @@ case "$1" in
# see if template exists
if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then
make_template
else
echo "Reusing existing template:"
gcloud compute images describe "$TEMPLATE_NAME" | grep "^creationTimestamp"
fi
;;
esac

View File

@ -1,6 +1,6 @@
#! /bin/bash
# shellcheck disable=SC1091
. ./config.sh
# shellcheck disable=SC1090,SC1091
. "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/config.sh"
set -e

126
lint
View File

@ -6,7 +6,7 @@
#
# For shell files, it runs shfmt. If you don't have that installed, you can get
# it with:
# go get -u github.com/mvdan/sh/cmd/shfmt
# go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt
#
# With no arguments, it lints the current files staged
# for git commit. Or you can pass it explicit filenames
@ -17,9 +17,11 @@
set -e
LINT_IGNORE_FILE=${LINT_IGNORE_FILE:-".lintignore"}
IGNORE_LINT_COMMENT=
IGNORE_TEST_PACKAGES=
IGNORE_SPELLINGS=
PARALLEL=
while true; do
case "$1" in
-nocomment)
@ -27,13 +29,17 @@ while true; do
shift 1
;;
-notestpackage)
IGNORE_TEST_PACKAGES=1
# NOOP, still accepted for backwards compatibility
shift 1
;;
-ignorespelling)
IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS"
shift 2
;;
-p)
PARALLEL=1
shift 1
;;
*)
break
;;
@ -65,30 +71,6 @@ spell_check() {
return $lint_result
}
test_mismatch() {
local filename="$1"
local package=$(grep '^package ' "$filename" | awk '{print $2}')
local lint_result=0
if [[ $package == "main" ]]; then
return # in package main, all bets are off
fi
if [[ $filename == *"_internal_test.go" ]]; then
if [[ $package == *"_test" ]]; then
lint_result=1
echo "${filename}: should not be part of a _test package"
fi
else
if [[ ! $package == *"_test" ]]; then
lint_result=1
echo "${filename}: should be part of a _test package"
fi
fi
return $lint_result
}
lint_go() {
local filename="$1"
local lint_result=0
@ -131,7 +113,7 @@ lint_sh() {
local filename="$1"
local lint_result=0
if ! diff <(shfmt -i 4 "${filename}") "${filename}" >/dev/null; then
if ! diff -u "${filename}" <(shfmt -i 4 "${filename}"); then
lint_result=1
echo "${filename}: run shfmt -i 4 -w ${filename}"
fi
@ -149,7 +131,7 @@ lint_tf() {
local filename="$1"
local lint_result=0
if ! diff <(hclfmt "${filename}") "${filename}" >/dev/null; then
if ! diff -u <(hclfmt "${filename}") "${filename}"; then
lint_result=1
echo "${filename}: run hclfmt -w ${filename}"
fi
@ -157,6 +139,35 @@ lint_tf() {
return $lint_result
}
lint_md() {
local filename="$1"
local lint_result=0
for i in '=======' '>>>>>>>'; do
if grep -q "${i}" "${filename}"; then
lint_result=1
echo "${filename}: bad merge/rebase!"
fi
done
return $lint_result
}
lint_py() {
local filename="$1"
local lint_result=0
if yapf --diff "${filename}" | grep -qE '^[+-]'; then
lint_result=1
echo "${filename}: run yapf --in-place ${filename}"
else
# Only run flake8 if yapf passes, since they pick up a lot of similar issues
flake8 "${filename}" || lint_result=1
fi
return $lint_result
}
lint() {
filename="$1"
ext="${filename##*\.}"
@ -167,13 +178,14 @@ lint() {
return
fi
# Don't lint static.go
# Don't lint specific files
case "$(basename "${filename}")" in
static.go) return ;;
coverage.html) return ;;
*.pb.go) return ;;
esac
if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" = "text/x-shellscript" ]]; then
if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" == "text/x-shellscript" ]]; then
ext="sh"
fi
@ -181,14 +193,10 @@ lint() {
go) lint_go "${filename}" || lint_result=1 ;;
sh) lint_sh "${filename}" || lint_result=1 ;;
tf) lint_tf "${filename}" || lint_result=1 ;;
md) lint_md "${filename}" || lint_result=1 ;;
py) lint_py "${filename}" || lint_result=1 ;;
esac
if [ -z "$IGNORE_TEST_PACKAGES" ]; then
if [[ "$filename" == *"_test.go" ]]; then
test_mismatch "${filename}" || lint_result=1
fi
fi
spell_check "${filename}" || lint_result=1
return $lint_result
@ -202,12 +210,46 @@ lint_files() {
exit $lint_result
}
list_files() {
if [ $# -gt 0 ]; then
git ls-files --exclude-standard | grep -vE '(^|/)vendor/'
matches_any() {
local filename="$1"
local patterns="$2"
while read -r pattern; do
# shellcheck disable=SC2053
# Use the [[ operator without quotes on $pattern
# in order to "glob" the provided filename:
[[ "$filename" == $pattern ]] && return 0
done <<<"$patterns"
return 1
}
filter_out() {
local patterns_file="$1"
if [ -n "$patterns_file" ] && [ -r "$patterns_file" ]; then
local patterns
patterns=$(sed '/^#.*$/d ; /^\s*$/d' "$patterns_file") # Remove blank lines and comments before we start iterating.
[ -n "$DEBUG" ] && echo >&2 "> Filters:" && echo >&2 "$patterns"
local filtered_out=()
while read -r filename; do
matches_any "$filename" "$patterns" && filtered_out+=("$filename") || echo "$filename"
done
[ -n "$DEBUG" ] && echo >&2 "> Files filtered out (i.e. NOT linted):" && printf >&2 '%s\n' "${filtered_out[@]}"
else
git diff --cached --name-only
cat # No patterns provided: simply propagate stdin to stdout.
fi
}
list_files "$@" | lint_files
list_files() {
if [ $# -gt 0 ]; then
find "$@" | grep -vE '(^|/)vendor/'
else
git ls-files --exclude-standard | grep -vE '(^|/)vendor/'
fi
}
if [ $# = 1 ] && [ -f "$1" ]; then
lint "$1"
elif [ -n "$PARALLEL" ]; then
list_files "$@" | filter_out "$LINT_IGNORE_FILE" | xargs -n1 -P16 "$0"
else
list_files "$@" | filter_out "$LINT_IGNORE_FILE" | lint_files
fi

55
provisioning/README.md Executable file
View File

@ -0,0 +1,55 @@
# Weaveworks provisioning
## Introduction
This project allows you to get hold of some machine either locally or on one of the below cloud providers:
* Amazon Web Services
* Digital Ocean
* Google Cloud Platform
You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
## Set up
* You will need [Vagrant](https://www.vagrantup.com) installed on your machine and added to your `PATH` in order to be able to provision local (virtual) machines automatically.
* On macOS: `brew install vagrant`
* On Linux (via Aptitude): `sudo apt install vagrant`
* If you need a specific version:
curl -fsS https://releases.hashicorp.com/terraform/x.y.z/terraform_x.y.z_linux_amd64.zip | gunzip > terraform && chmod +x terraform && sudo mv terraform /usr/bin
* For other platforms or more details, see [here](https://www.vagrantup.com/docs/installation/)
* You will need [Terraform](https://www.terraform.io) installed on your machine and added to your `PATH` in order to be able to provision cloud-hosted machines automatically.
* On macOS: `brew install terraform`
* On Linux (via Aptitude): `sudo apt install terraform`
* For other platforms or more details, see [here](https://www.terraform.io/intro/getting-started/install.html)
* Depending on the cloud provider, you may have to create an account, manually onboard, create and register SSH keys, etc.
Please refer to the `README.md` in each sub-folder for more details.
## Usage in scripts
Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either:
* `gcp_on` / `gcp_off`
* `do_on` / `do_off`
* `aws_on` / `aws_off`
## Usage in shell
Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either:
* `gcp_on` / `gcp_off`
* `do_on` / `do_off`
* `aws_on` / `aws_off`
Indeed, the functions defined in `setup.sh` are also exported as aliases, so you can call them from your shell directly.
Other aliases are also defined, in order to make your life easier:
* `tf_ssh`: to ease SSH-ing into the virtual machines, reading the username and IP address to use from Terraform, as well as setting default SSH options.
* `tf_ansi`: to ease applying an Ansible playbook to a set of virtual machines, dynamically creating the inventory, as well as setting default SSH options.

View File

@ -0,0 +1,90 @@
# Amazon Web Services
## Introduction
This project allows you to get hold of some machine on Amazon Web Services.
You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
## Setup
* Log in [weaveworks.signin.aws.amazon.com/console](https://weaveworks.signin.aws.amazon.com/console/) with your account.
* Go to `Services` > `IAM` > `Users` > Click on your username > `Security credentials` > `Create access key`.
Your access key and secret key will appear on the screen. Set these as environment variables:
```
export AWS_ACCESS_KEY_ID=<your access key>
export AWS_SECRET_ACCESS_KEY=<your secret key>
```
* Go to `Services` > `EC2` > Select the availability zone you want to use (see top right corner, e.g. `us-east-1`) > `Import Key Pair`.
Enter your SSH public key and the name for it, and click `Import`.
Set the path to your private key as an environment variable:
```
export TF_VAR_aws_public_key_name=<your Amazon Web Services SSH key name>
export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa"
```
* Set your current IP address as an environment variable:
```
export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
```
or pass it as a Terraform variable:
```
$ terraform <command> -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)'
```
### Bash aliases
You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file:
```
function _aws_on() {
export AWS_ACCESS_KEY_ID="<your_access_key_id>" # Replace with appropriate value.
export AWS_SECRET_ACCESS_KEY="<your_secret_access_key>" # Replace with appropriate value.
export TF_VAR_aws_public_key_name="<your_ssh_key_name>" # Replace with appropriate value.
export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value.
}
alias _aws_on='_aws_on'
function _aws_off() {
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset TF_VAR_aws_public_key_name
unset TF_VAR_aws_private_key_path
}
alias _aws_off='_aws_off'
```
N.B.:
* sourcing `../setup.sh` defines aliases called `aws_on` and `aws_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above);
* `../setup.sh`'s `aws_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information.
## Usage
* Create the machine: `terraform apply`
* Show the machine's status: `terraform show`
* Stop and destroy the machine: `terraform destroy`
* SSH into the newly-created machine:
```
$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips`
# N.B.: the default username will differ depending on the AMI/OS you installed, e.g. ubuntu for Ubuntu, ec2-user for Red Hat, etc.
```
or
```
source ../setup.sh
tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned.
```
## Resources
* [https://www.terraform.io/docs/providers/aws/](https://www.terraform.io/docs/providers/aws/)
* [https://www.terraform.io/docs/providers/aws/r/instance.html](https://www.terraform.io/docs/providers/aws/r/instance.html)
* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html)

137
provisioning/aws/main.tf Executable file
View File

@ -0,0 +1,137 @@
# Specify the provider and access details
provider "aws" {
# Access key, secret key and region are sourced from environment variables or input arguments -- see README.md
region = "${var.aws_dc}"
}
resource "aws_security_group" "allow_ssh" {
name = "${var.name}_allow_ssh"
description = "AWS security group to allow SSH-ing onto AWS EC2 instances (created using Terraform)."
# Open TCP port for SSH:
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${var.client_ip}/32"]
}
tags {
Name = "${var.name}_allow_ssh"
App = "${var.app}"
CreatedBy = "terraform"
}
}
resource "aws_security_group" "allow_docker" {
name = "${var.name}_allow_docker"
description = "AWS security group to allow communication with Docker on AWS EC2 instances (created using Terraform)."
# Open TCP port for Docker:
ingress {
from_port = 2375
to_port = 2375
protocol = "tcp"
cidr_blocks = ["${var.client_ip}/32"]
}
tags {
Name = "${var.name}_allow_docker"
App = "${var.app}"
CreatedBy = "terraform"
}
}
resource "aws_security_group" "allow_weave" {
name = "${var.name}_allow_weave"
description = "AWS security group to allow communication with Weave on AWS EC2 instances (created using Terraform)."
# Open TCP port for Weave:
ingress {
from_port = 12375
to_port = 12375
protocol = "tcp"
cidr_blocks = ["${var.client_ip}/32"]
}
tags {
Name = "${var.name}_allow_weave"
App = "${var.app}"
CreatedBy = "terraform"
}
}
resource "aws_security_group" "allow_private_ingress" {
name = "${var.name}_allow_private_ingress"
description = "AWS security group to allow all private ingress traffic on AWS EC2 instances (created using Terraform)."
# Full inbound local network access on both TCP and UDP
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.aws_vpc_cidr_block}"]
}
tags {
Name = "${var.name}_allow_private_ingress"
App = "${var.app}"
CreatedBy = "terraform"
}
}
resource "aws_security_group" "allow_all_egress" {
name = "${var.name}_allow_all_egress"
description = "AWS security group to allow all egress traffic on AWS EC2 instances (created using Terraform)."
# Full outbound internet access on both TCP and UDP
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
Name = "${var.name}_allow_all_egress"
App = "${var.app}"
CreatedBy = "terraform"
}
}
resource "aws_instance" "tf_test_vm" {
instance_type = "${var.aws_size}"
count = "${var.num_hosts}"
# Lookup the correct AMI based on the region we specified
ami = "${lookup(var.aws_amis, var.aws_dc)}"
key_name = "${var.aws_public_key_name}"
security_groups = [
"${aws_security_group.allow_ssh.name}",
"${aws_security_group.allow_docker.name}",
"${aws_security_group.allow_weave.name}",
"${aws_security_group.allow_private_ingress.name}",
"${aws_security_group.allow_all_egress.name}",
]
# Wait for machine to be SSH-able:
provisioner "remote-exec" {
inline = ["exit"]
connection {
type = "ssh"
# Lookup the correct username based on the AMI we specified
user = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}"
private_key = "${file("${var.aws_private_key_path}")}"
}
}
tags {
Name = "${var.name}-${count.index}"
App = "${var.app}"
CreatedBy = "terraform"
}
}

54
provisioning/aws/outputs.tf Executable file
View File

@ -0,0 +1,54 @@
output "username" {
value = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}"
}
output "public_ips" {
value = ["${aws_instance.tf_test_vm.*.public_ip}"]
}
output "hostnames" {
value = "${join("\n",
"${formatlist("%v.%v.%v",
aws_instance.tf_test_vm.*.tags.Name,
aws_instance.tf_test_vm.*.availability_zone,
var.app
)}"
)}"
}
# /etc/hosts file for the Droplets:
output "private_etc_hosts" {
value = "${join("\n",
"${formatlist("%v %v.%v.%v",
aws_instance.tf_test_vm.*.private_ip,
aws_instance.tf_test_vm.*.tags.Name,
aws_instance.tf_test_vm.*.availability_zone,
var.app
)}"
)}"
}
# /etc/hosts file for the client:
output "public_etc_hosts" {
value = "${join("\n",
"${formatlist("%v %v.%v.%v",
aws_instance.tf_test_vm.*.public_ip,
aws_instance.tf_test_vm.*.tags.Name,
aws_instance.tf_test_vm.*.availability_zone,
var.app
)}"
)}"
}
output "ansible_inventory" {
value = "${format("[all]\n%s", join("\n",
"${formatlist("%v private_ip=%v",
aws_instance.tf_test_vm.*.public_ip,
aws_instance.tf_test_vm.*.private_ip,
)}"
))}"
}
output "private_key_path" {
value = "${var.aws_private_key_path}"
}

68
provisioning/aws/variables.tf Executable file
View File

@ -0,0 +1,68 @@
variable "client_ip" {
description = "IP address of the client machine"
}
variable "app" {
description = "Name of the application using the created EC2 instance(s)."
default = "default"
}
variable "name" {
description = "Name of the EC2 instance(s)."
default = "test"
}
variable "num_hosts" {
description = "Number of EC2 instance(s)."
default = 1
}
variable "aws_vpc_cidr_block" {
description = "AWS VPC CIDR block to use to attribute private IP addresses."
default = "172.31.0.0/16"
}
variable "aws_public_key_name" {
description = "Name of the SSH keypair to use in AWS."
}
variable "aws_private_key_path" {
description = "Path to file containing private key"
default = "~/.ssh/id_rsa"
}
variable "aws_dc" {
description = "The AWS region to create things in."
default = "us-east-1"
}
variable "aws_amis" {
default = {
# Ubuntu Server 16.04 LTS (HVM), SSD Volume Type:
"us-east-1" = "ami-40d28157"
"eu-west-2" = "ami-23d0da47"
# Red Hat Enterprise Linux 7.3 (HVM), SSD Volume Type:
#"us-east-1" = "ami-b63769a1"
# CentOS 7 (x86_64) - with Updates HVM
#"us-east-1" = "ami-6d1c2007"
}
}
variable "aws_usernames" {
description = "User to SSH as into the AWS instance."
default = {
"ami-40d28157" = "ubuntu" # Ubuntu Server 16.04 LTS (HVM)
"ami-b63769a1" = "ec2-user" # Red Hat Enterprise Linux 7.3 (HVM)
"ami-6d1c2007" = "centos" # CentOS 7 (x86_64) - with Updates HVM
}
}
variable "aws_size" {
description = "AWS' selected machine size"
default = "t2.medium" # Instance with 2 cores & 4 GB memory
}

98
provisioning/do/README.md Executable file
View File

@ -0,0 +1,98 @@
# Digital Ocean
## Introduction
This project allows you to get hold of some machine on Digital Ocean.
You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
## Setup
* Log in [cloud.digitalocean.com](https://cloud.digitalocean.com) with your account.
* Go to `Settings` > `Security` > `SSH keys` > `Add SSH Key`.
Enter your SSH public key and the name for it, and click `Add SSH Key`.
Set the path to your private key as an environment variable:
```
export DIGITALOCEAN_SSH_KEY_NAME=<your Digital Ocean SSH key name>
export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa"
```
* Go to `API` > `Tokens` > `Personal access tokens` > `Generate New Token`
Enter your token name and click `Generate Token` to get your 64-characters-long API token.
Set these as environment variables:
```
export DIGITALOCEAN_TOKEN_NAME="<your Digital Ocean API token name>"
export DIGITALOCEAN_TOKEN=<your Digital Ocean API token>
```
* Run the following command to get the Digital Ocean ID for your SSH public key (e.g. `1234567`) and set it as an environment variable:
```
$ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" \
| jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id')
```
or pass it as a Terraform variable:
```
$ terraform <command> \
-var 'do_private_key_path=<path to your SSH private key>' \
-var 'do_public_key_id=<ID of your SSH public key>'
```
### Bash aliases
You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file:
```
function _do_on() {
export DIGITALOCEAN_TOKEN_NAME="<your_token_name>" # Replace with appropriate value.
export DIGITALOCEAN_TOKEN=<your_token> # Replace with appropriate value.
export DIGITALOCEAN_SSH_KEY_NAME="<your_ssh_key_name>" # Replace with appropriate value.
export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value.
export TF_VAR_do_public_key_path="$HOME/.ssh/id_rsa.pub" # Replace with appropriate value.
export TF_VAR_do_public_key_id=<your_ssh_key_id> # Replace with appropriate value.
}
alias _do_on='_do_on'
function _do_off() {
unset DIGITALOCEAN_TOKEN_NAME
unset DIGITALOCEAN_TOKEN
unset DIGITALOCEAN_SSH_KEY_NAME
unset TF_VAR_do_private_key_path
unset TF_VAR_do_public_key_path
unset TF_VAR_do_public_key_id
}
alias _do_off='_do_off'
```
N.B.:
* sourcing `../setup.sh` defines aliases called `do_on` and `do_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above);
* `../setup.sh`'s `do_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information.
## Usage
* Create the machine: `terraform apply`
* Show the machine's status: `terraform show`
* Stop and destroy the machine: `terraform destroy`
* SSH into the newly-created machine:
```
$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips`
```
or
```
source ../setup.sh
tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned.
```
## Resources
* [https://www.terraform.io/docs/providers/do/](https://www.terraform.io/docs/providers/do/)
* [https://www.terraform.io/docs/providers/do/r/droplet.html](https://www.terraform.io/docs/providers/do/r/droplet.html)
* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html)

42
provisioning/do/main.tf Executable file
View File

@ -0,0 +1,42 @@
provider "digitalocean" {
# See README.md for setup instructions.
}
# Tags to label and organize droplets:
resource "digitalocean_tag" "name" {
name = "${var.name}"
}
resource "digitalocean_tag" "app" {
name = "${var.app}"
}
resource "digitalocean_tag" "terraform" {
name = "terraform"
}
resource "digitalocean_droplet" "tf_test_vm" {
ssh_keys = ["${var.do_public_key_id}"]
image = "${var.do_os}"
region = "${var.do_dc}"
size = "${var.do_size}"
name = "${var.name}-${count.index}"
count = "${var.num_hosts}"
tags = [
"${var.app}",
"${var.name}",
"terraform",
]
# Wait for machine to be SSH-able:
provisioner "remote-exec" {
inline = ["exit"]
connection {
type = "ssh"
user = "${var.do_username}"
private_key = "${file("${var.do_private_key_path}")}"
}
}
}

57
provisioning/do/outputs.tf Executable file
View File

@ -0,0 +1,57 @@
output "username" {
value = "${var.do_username}"
}
output "public_ips" {
value = ["${digitalocean_droplet.tf_test_vm.*.ipv4_address}"]
}
output "hostnames" {
value = "${join("\n",
"${formatlist("%v.%v.%v",
digitalocean_droplet.tf_test_vm.*.name,
digitalocean_droplet.tf_test_vm.*.region,
var.app
)}"
)}"
}
# /etc/hosts file for the Droplets:
# N.B.: by default Digital Ocean droplets only have public IPs, but in order to
# be consistent with other providers' recipes, we provide an output to generate
# an /etc/hosts file on the Droplets, even though it is using public IPs only.
output "private_etc_hosts" {
value = "${join("\n",
"${formatlist("%v %v.%v.%v",
digitalocean_droplet.tf_test_vm.*.ipv4_address,
digitalocean_droplet.tf_test_vm.*.name,
digitalocean_droplet.tf_test_vm.*.region,
var.app
)}"
)}"
}
# /etc/hosts file for the client:
output "public_etc_hosts" {
value = "${join("\n",
"${formatlist("%v %v.%v.%v",
digitalocean_droplet.tf_test_vm.*.ipv4_address,
digitalocean_droplet.tf_test_vm.*.name,
digitalocean_droplet.tf_test_vm.*.region,
var.app
)}"
)}"
}
output "ansible_inventory" {
value = "${format("[all]\n%s", join("\n",
"${formatlist("%v private_ip=%v",
digitalocean_droplet.tf_test_vm.*.ipv4_address,
digitalocean_droplet.tf_test_vm.*.ipv4_address
)}"
))}"
}
output "private_key_path" {
value = "${var.do_private_key_path}"
}

185
provisioning/do/variables.tf Executable file
View File

@ -0,0 +1,185 @@
variable "client_ip" {
description = "IP address of the client machine"
}
variable "app" {
description = "Name of the application using the created droplet(s)."
default = "default"
}
variable "name" {
description = "Name of the droplet(s)."
default = "test"
}
variable "num_hosts" {
description = "Number of droplet(s)."
default = 1
}
variable "do_private_key_path" {
description = "Digital Ocean SSH private key path"
default = "~/.ssh/id_rsa"
}
variable "do_public_key_id" {
description = "Digital Ocean ID for your SSH public key"
# You can retrieve it and set it as an environment variable this way:
# $ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" | jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id')
}
variable "do_username" {
description = "Digital Ocean SSH username"
default = "root"
}
variable "do_os" {
description = "Digital Ocean OS"
default = "ubuntu-16-04-x64"
}
# curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/images?page=1&per_page=999999" | jq ".images | .[] | .slug" | grep -P "ubuntu|coreos|centos" | grep -v alpha | grep -v beta
# "ubuntu-16-04-x32"
# "ubuntu-16-04-x64"
# "ubuntu-16-10-x32"
# "ubuntu-16-10-x64"
# "ubuntu-14-04-x32"
# "ubuntu-14-04-x64"
# "ubuntu-12-04-x64"
# "ubuntu-12-04-x32"
# "coreos-stable"
# "centos-6-5-x32"
# "centos-6-5-x64"
# "centos-7-0-x64"
# "centos-7-x64"
# "centos-6-x64"
# "centos-6-x32"
# "centos-5-x64"
# "centos-5-x32"
# Digital Ocean datacenters
# See also:
# $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/regions" | jq -c ".regions | .[] | .slug" | sort -u
variable "do_dc_ams2" {
description = "Digital Ocean Amsterdam Datacenter 2"
default = "ams2"
}
variable "do_dc_ams3" {
description = "Digital Ocean Amsterdam Datacenter 3"
default = "ams3"
}
variable "do_dc_blr1" {
description = "Digital Ocean Bangalore Datacenter 1"
default = "blr1"
}
variable "do_dc_fra1" {
description = "Digital Ocean Frankfurt Datacenter 1"
default = "fra1"
}
variable "do_dc_lon1" {
description = "Digital Ocean London Datacenter 1"
default = "lon1"
}
variable "do_dc_nyc1" {
description = "Digital Ocean New York Datacenter 1"
default = "nyc1"
}
variable "do_dc_nyc2" {
description = "Digital Ocean New York Datacenter 2"
default = "nyc2"
}
variable "do_dc_nyc3" {
description = "Digital Ocean New York Datacenter 3"
default = "nyc3"
}
variable "do_dc_sfo1" {
description = "Digital Ocean San Francisco Datacenter 1"
default = "sfo1"
}
variable "do_dc_sfo2" {
description = "Digital Ocean San Francisco Datacenter 2"
default = "sfo2"
}
variable "do_dc_sgp1" {
description = "Digital Ocean Singapore Datacenter 1"
default = "sgp1"
}
variable "do_dc_tor1" {
description = "Digital Ocean Toronto Datacenter 1"
default = "tor1"
}
variable "do_dc" {
description = "Digital Ocean's selected datacenter"
default = "lon1"
}
variable "do_size" {
description = "Digital Ocean's selected machine size"
default = "4gb"
}
# Digital Ocean sizes
# See also:
# $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/sizes" | jq -c ".sizes | .[] | .slug"
# "512mb"
# "1gb"
# "2gb"
# "4gb"
# "8gb"
# "16gb"
# "m-16gb"
# "32gb"
# "m-32gb"
# "48gb"
# "m-64gb"
# "64gb"
# "m-128gb"
# "m-224gb"

126
provisioning/gcp/README.md Executable file
View File

@ -0,0 +1,126 @@
# Google Cloud Platform
## Introduction
This project allows you to get hold of some machine on Google Cloud Platform.
You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
## Setup
* Log in [console.cloud.google.com](https://console.cloud.google.com) with your Google account.
* Go to `API Manager` > `Credentials` > `Create credentials` > `Service account key`,
in `Service account`, select `Compute Engine default service account`,
in `Key type`, select `JSON`, and then click `Create`.
* This will download a JSON file to your machine. Place this file wherever you want and then create the following environment variables:
```
$ export GOOGLE_CREDENTIALS_FILE="path/to/your.json"
$ export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE")
```
* Go to `Compute Engine` > `Metadata` > `SSH keys` and add your username and SSH public key;
or
set it up using `gcloud compute project-info add-metadata --metadata-from-file sshKeys=~/.ssh/id_rsa.pub`.
If you used your default SSH key (i.e. `~/.ssh/id_rsa.pub`), then you do not have anything to do.
Otherwise, you will have to either define the below environment variable:
```
$ export TF_VAR_gcp_public_key_path=<path to your SSH public key>
$ export TF_VAR_gcp_private_key_path=<path to your SSH private key>
```
or to pass these as Terraform variables:
```
$ terraform <command> \
-var 'gcp_public_key_path=<path to your SSH public key>' \
-var 'gcp_private_key_path=<path to your SSH private key>'
```
* Set the username in your public key as an environment variable.
This will be used as the username of the Linux account created on the machine, which you will need to SSH into it later on.
N.B.:
* GCP already has the username set from the SSH public key you uploaded in the previous step.
* If your username is an email address, e.g. `name@domain.com`, then GCP uses `name` as the username.
```
export TF_VAR_gcp_username=<your SSH public key username>
```
* Set your current IP address as an environment variable:
```
export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
```
or pass it as a Terraform variable:
```
$ terraform <command> -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)'
```
* Set your project as an environment variable:
```
export TF_VAR_gcp_project=weave-net-tests
```
or pass it as a Terraform variable:
```
$ terraform <command> -var 'gcp_project=weave-net-tests'
```
### Bash aliases
You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file:
```
function _gcp_on() {
export GOOGLE_CREDENTIALS_FILE="<path/to/your/json/credentials/file.json"
export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE")
export TF_VAR_gcp_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value.
export TF_VAR_gcp_public_key_path="$HOME/.ssh/id_rsa.pub" # Replace with appropriate value.
export TF_VAR_gcp_username=$(cat "$TF_VAR_gcp_public_key_path" | cut -d' ' -f3 | cut -d'@' -f1)
}
alias _gcp_on='_gcp_on'
function _gcp_off() {
unset GOOGLE_CREDENTIALS_FILE
unset GOOGLE_CREDENTIALS
unset TF_VAR_gcp_private_key_path
unset TF_VAR_gcp_public_key_path
unset TF_VAR_gcp_username
}
```
N.B.:
* sourcing `../setup.sh` defines aliases called `gcp_on` and `gcp_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above);
* `../setup.sh`'s `gcp_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information.
## Usage
* Create the machine: `terraform apply`
* Show the machine's status: `terraform show`
* Stop and destroy the machine: `terraform destroy`
* SSH into the newly-created machine:
```
$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips`
```
or
```
source ../setup.sh
tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned.
```
## Resources
* [https://www.terraform.io/docs/providers/google/](https://www.terraform.io/docs/providers/google/)
* [https://www.terraform.io/docs/providers/google/r/compute_instance.html](https://www.terraform.io/docs/providers/google/r/compute_instance.html)
* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html)

79
provisioning/gcp/main.tf Executable file
View File

@ -0,0 +1,79 @@
provider "google" {
# Set the below environment variables:
# - GOOGLE_CREDENTIALS
# - GOOGLE_PROJECT
# - GOOGLE_REGION
# or configure directly below.
# See also:
# - https://www.terraform.io/docs/providers/google/
# - https://console.cloud.google.com/apis/credentials/serviceaccountkey?project=<PROJECT ID>&authuser=1
region = "${var.gcp_region}"
project = "${var.gcp_project}"
}
resource "google_compute_instance" "tf_test_vm" {
name = "${var.name}-${count.index}"
machine_type = "${var.gcp_size}"
zone = "${var.gcp_zone}"
count = "${var.num_hosts}"
disk {
image = "${var.gcp_image}"
}
tags = [
"${var.app}",
"${var.name}",
"terraform",
]
network_interface {
network = "${var.gcp_network}"
access_config {
// Ephemeral IP
}
}
metadata {
ssh-keys = "${var.gcp_username}:${file("${var.gcp_public_key_path}")}"
}
# Wait for machine to be SSH-able:
provisioner "remote-exec" {
inline = ["exit"]
connection {
type = "ssh"
user = "${var.gcp_username}"
private_key = "${file("${var.gcp_private_key_path}")}"
}
}
}
resource "google_compute_firewall" "fw-allow-docker-and-weave" {
name = "${var.name}-allow-docker-and-weave"
network = "${var.gcp_network}"
target_tags = ["${var.name}"]
allow {
protocol = "tcp"
ports = ["2375", "12375"]
}
source_ranges = ["${var.client_ip}"]
}
# Required for FastDP crypto in Weave Net:
resource "google_compute_firewall" "fw-allow-esp" {
name = "${var.name}-allow-esp"
network = "${var.gcp_network}"
target_tags = ["${var.name}"]
allow {
protocol = "esp"
}
source_ranges = ["${var.gcp_network_global_cidr}"]
}

66
provisioning/gcp/outputs.tf Executable file
View File

@ -0,0 +1,66 @@
output "username" {
value = "${var.gcp_username}"
}
output "public_ips" {
value = ["${google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip}"]
}
output "hostnames" {
value = "${join("\n",
"${formatlist("%v.%v.%v",
google_compute_instance.tf_test_vm.*.name,
google_compute_instance.tf_test_vm.*.zone,
var.app
)}"
)}"
}
# /etc/hosts file for the Compute Engine instances:
output "private_etc_hosts" {
value = "${join("\n",
"${formatlist("%v %v.%v.%v",
google_compute_instance.tf_test_vm.*.network_interface.0.address,
google_compute_instance.tf_test_vm.*.name,
google_compute_instance.tf_test_vm.*.zone,
var.app
)}"
)}"
}
# /etc/hosts file for the client:
output "public_etc_hosts" {
value = "${join("\n",
"${formatlist("%v %v.%v.%v",
google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip,
google_compute_instance.tf_test_vm.*.name,
google_compute_instance.tf_test_vm.*.zone,
var.app
)}"
)}"
}
output "ansible_inventory" {
value = "${format("[all]\n%s", join("\n",
"${formatlist("%v private_ip=%v",
google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip,
google_compute_instance.tf_test_vm.*.network_interface.0.address
)}"
))}"
}
output "private_key_path" {
value = "${var.gcp_private_key_path}"
}
output "instances_names" {
value = ["${google_compute_instance.tf_test_vm.*.name}"]
}
output "image" {
value = "${var.gcp_image}"
}
output "zone" {
value = "${var.gcp_zone}"
}

77
provisioning/gcp/variables.tf Executable file
View File

@ -0,0 +1,77 @@
variable "gcp_username" {
description = "Google Cloud Platform SSH username"
}
variable "app" {
description = "Name of the application using the created Compute Engine instance(s)."
default = "default"
}
variable "name" {
description = "Name of the Compute Engine instance(s)."
default = "test"
}
variable "num_hosts" {
description = "Number of Compute Engine instance(s)."
default = 1
}
variable "client_ip" {
description = "IP address of the client machine"
}
variable "gcp_public_key_path" {
description = "Path to file containing public key"
default = "~/.ssh/id_rsa.pub"
}
variable "gcp_private_key_path" {
description = "Path to file containing private key"
default = "~/.ssh/id_rsa"
}
variable "gcp_project" {
description = "Google Cloud Platform project"
default = "weave-net-tests"
}
variable "gcp_image" {
# See also: https://cloud.google.com/compute/docs/images
# For example:
# - "ubuntu-os-cloud/ubuntu-1604-lts"
# - "debian-cloud/debian-8"
# - "centos-cloud/centos-7"
# - "rhel-cloud/rhel7"
description = "Google Cloud Platform OS"
default = "ubuntu-os-cloud/ubuntu-1604-lts"
}
variable "gcp_size" {
# See also:
# $ gcloud compute machine-types list
description = "Google Cloud Platform's selected machine size"
default = "n1-standard-1"
}
variable "gcp_region" {
description = "Google Cloud Platform's selected region"
default = "us-central1"
}
variable "gcp_zone" {
description = "Google Cloud Platform's selected zone"
default = "us-central1-a"
}
variable "gcp_network" {
description = "Google Cloud Platform's selected network"
default = "test"
}
variable "gcp_network_global_cidr" {
description = "CIDR covering all regions for the selected Google Cloud Platform network"
default = "10.128.0.0/9"
}

361
provisioning/setup.sh Executable file
View File

@ -0,0 +1,361 @@
#!/bin/bash
#
# Description:
# Helper functions to programmatically provision (e.g. for CIT).
# Aliases on these functions are also created so that this script can be
# sourced in your shell, in your ~/.bashrc file, etc. and directly called.
#
# Usage:
# Source this file and call the relevant functions.
#
function ssh_public_key() {
echo -e "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZBgLQts30PYXEMJnCU21QC+1ZE0Sv/Ry48Au3nYXn1KNoW/7C2qQ3KO2ZnpZRHCstFiU8QIlB9edi0cgcAoDWBkCiFBZEORxMvohWtrRQzf+x59o48lVjA/Fn7G+9hmavhLaDf6Qe7OhH8XUshNtnIQIUvNEWXKE75k32wUbuF8ibhJNpOOYKL4tVXK6IIKg6jR88BwGKPY/NZCl/HbhjnDJY0zCU1pZSprN6o/S953y/XXVozkh1772fCNeu4USfbt0oZOEJ57j6EWwEYIJhoeAEMAoD8ELt/bc/5iex8cuarM4Uib2JHO6WPWbBQ0NlrARIOKLrxkjjfGWarOLWBAgvwQn5zLg1pKb7aI4+jbA+ZSrII5B2HuYE9MDlU8NPL4pHrRfapGLkG/Fe9zNPvScXh+9iSWfD6G5ZoISutjiJO/iVYN0QSuj9QEIj9tl20czFz3Dhnq4sPPl5hoLunyQfajY7C/ipv6ilJyrEc0V6Z9FdPhpEI+HOgJr2vDQTFscQuyfWuzGJDZf6zPdZWo2pBql9E7piARuNAjakylGar/ebkCgfy28XQoDbDT0P0VYp+E8W5EYacx+zc5MuNhRTvbsO12fydT8V61MtA78wM/b0059feph+0zTykEHk670mYVoE3erZX+U1/BVBLSV9QzopO6/Pgx2ryriJfQ== weaveworks-cit"
}
function decrypt() {
if [ -z "$1" ]; then
echo >&2 "Failed to decode and decrypt $2: no secret key was provided."
return 1
fi
echo "$3" | openssl base64 -d | openssl enc -d -aes256 -pass "pass:$1"
}
function ssh_private_key() {
# The private key has been AES256-encrypted and then Base64-encoded using the following command:
# $ openssl enc -in /tmp/weaveworks_cit_id_rsa -e -aes256 -pass stdin | openssl base64 > /tmp/weaveworks_cit_id_rsa.aes.b64
# The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout.
# N.B.: Ask the password to Marc, or otherwise re-generate the SSH key using:
# $ ssh-keygen -t rsa -b 4096 -C "weaveworks-cit"
decrypt "$1" "SSH private key" "$(
cat <<EOF
U2FsdGVkX195fX5zswH1C5ho3hkYnrAG0SQmTubdc5vW6DSDgYlpxmoXufGAImqH
eaIhC8mEespdqOrIGOIBf0QU9Mm386R/tuxQMxCU/ZLYhuOYMmMtTytBzyDmI1Mf
NjfE7wTsPUzrys46ZJ5H/AHN/F/0N/jXIEwD+M8sSLshatBbgv49MUtZrVy7zVK6
zhb7kbYZAxuFQsv0M7PtBOM9WLp18ttmGjv/5ag/74ZDyj3HSC7/+7jTxUS4zxS6
XrWfiOUlugPjryIeOgkjbDIOqan/h45rECkX96ej+w685diiNMYpgzX7NgMHB5AW
PsK1mwnfuNzrm1Qep/wkO0t8Vp4Q5XKmhntKHByr/86R991WEtSpDkKx6T5IzNGU
+wSdMd59jmdrLwe2fjn3i8V7SULx6rC4gNQ3IsoZN7w8/LLhi3UlHlswu1rMOAZS
irITg+F5qjKYDfaXmW1k/RDy9N6pjkTuGck2SRxSfnIQZ2ncX4bLD9ymVBYmB++X
ylEcxYBZPbcVm3tbLRxaK4AUBLqywlt+4gn6hXIq3t3HIgAeFrTKO7fF8orVMIhU
3GrYJHMA4kNhXo4QIhCEkWex0wHFntNKb4ZvPRhKZiIq8JrGE5CVONQhN9z+A1Tp
XmGrVG5ywtQ4HrlLxeGzfXFaJRU2Uv+T/LeYWWili1tmRlQu54jGkkWRCPN4NLNX
5ZiFfej+4kWLQ3m12GL3NDjKHSdoSIBJxj9QvYwB6+wpLdCgHnOp3ItymBRJCuh+
t5pyVUGMN/xCHu8sGOAWpZ5kJrzImduD46G17AoJ3IiKhJ+vXiafCwukZcpmNwEF
C1VKEPwIzJeTIIg7qyyNT/aDHaUMBC5C7pKkI70b0fmKyBxmmt36tlNE0cg344E7
sNh5C6x+0mSixhI0g9UsuvnNs0gt+GmbDp17KOISM0qc+39LbiGLmsP7zxweqOm6
3/tStFOx0VI2iJMIywbWgJvHgWWuzd5ZveJhbcjdckUDXZ45lcs4y9fMTri1Cj4O
hrQCsTqK/cpmx1ZIaPhws2Z2NsP942E7te/wq2mBx0HppT0i9ZJpwz9vLRisaqgF
LO8b9PE3kWhIejPmDy53iJExBcR/z9M336SDfeDrJkqXg1gytiSnyh2sCaOKlEQR
im3WAiiJaqH3k1+hQ3vLWgNfq1+Nu/EcLew9MbKMTmYsSKA9cLz8zB4ZevHipa2B
MyKOntCzX+ROAeTvjLWZvuf9J1XWQaOs15/N0nyCahQHBs38XPQbaruOHooZ8iHi
rjHLJvPEdMJ76L+qkW+YWnjzf7qxmi+XjeNzDwGGsYRLdz8BxVrOdAISXdsJh9zn
7KXh4vRnPFsgetIx9FHVpvy0f9+uE4AQHKQ3D2mC3+jnaonxZm3Sxh1IqGSQLEfD
Qy7mIv5YEc8QI4AFcfZyuL1MSRuYVPr+ZHvQaWaF3NpscH8F/anzyczqbxjmhqph
4iZifLrHCNQKnDTR5i+xUWJxWsTrWGDLEAKu2UQ2mU+XCMXSx3D2OzYkgN1v5fnC
epAoKPa4HkyoHbCG2sl0A6O6vuoRAtQ8/h/jkpCXgCrGPQq15mtkVUCqFKqhYJq1
ugAYrUqxMSaNUFOjH/AKHK7GIaAqaonFhAblxVTHhzJ3k//rBUoRhz8Xoj1rpkkY
aZE1Sz0FFwEjFSPimXQz6TXb0rR6Ga9KjmbIhzaQ+aEFpYXof9kwXQTkeoSV1GHa
RLJu3De1SYC0a7zJbjkHPSJ55RX2PEEzHGe/3xFbH8M24ox0E29ewNZtAZ7yNhyi
88xSonlJFt5sOBuk5bNsJ9AZ9pEekrNJ1BigkT4q+cA0gCUJJ0MuBrijdufqLDIw
p9ozT1vfWrtzLBqHOcRvhWGJ48VXliJjKzpN+fmFEqxifu0+sfxzroluNjhuKTF8
5P0rLohZ+Xtvze5WszSMrSFAmi3TUOSPrxGZ+fZfttkBae0uj/mTFUNo61pRZSxR
hpPyq8NlusfUscX81zE3jNansIVsf54TVM2cb5fBrdS+SYhc5izbEMjI958ZPndf
iJID3oWKrWbn7ebszS0g0T2Hurk4VALgECLAxYqP/S32SOB6Y9EcE1dUq0VI2kzs
/HvMW05iWGDQ9fYWba/X+cpKfrRFXWFfD8CndDLidY9kHe2Zq9nEz+C/Zfi4YQKt
7nLpC85fvIaAnRxDlW8O/Sj8+TBNPcrsxeuhYfilIcapVs8/Plbtc7M6z7v1LO5i
bFeCBLwv+ZB1OUcxjuzCNVGBSvmYQmJbq37WDqPd+a8hqkz8/khH/CmUjp/MDrQN
64HIe+/USU9LvOI4ZkT/w/POmU2uxKWIc/OiSWuDgr6QsPYEjgMj1sEU8xT5HwOr
m9uBBgU/Pt118cmRPZDa25qyGEbiGvnjFl1fh5WgDg3gNQStEsuKy2IILGrzDMX3
IxuGr793Jp2zxawxzAcqSNvhf2b16f4hBueKqBPvNEfiPGzBqz+x636kYvhuUYmU
KxWZpsfBLbn7EL7O8OorzPBNOLJOiz1YmZ7cST2EYD7aEOAQMQ5n/6uyS7bP+dHR
wSVelYhKH/zIklHSH3/ERCPpmiYPdcFXEuu5PoGB9bqGae4RGm41350iecPn/GEM
Ykjc0aSed31gcFMIO+WDUgIc6qqJZklW7YMNfeKjeXzmml0hVMJrxbtPSr042jem
qzu/FuFLs47vpc8ooBO6bOa/Foicq5ypxenVT0YWPlReFpK+BVRpyHrk+MeXqP6Q
ReAfxli9MrM0EQc2I0ok/OA3H61BE5cr1cR9Sj4CH9ZFJfoGDNvn64RL9p2C1SkQ
Y+kWGWPdwsw+iSXsw+864H/Noojs8saQtyognAxYEb/DinSaqlil6EUydCyVZCWx
kuYb2zBxeh3W8IZcmHIl/aaobk8KHWwv+1/KWS3M21PKFwkEKWl42kRTn14fXo7y
9MhmbCgVxi2lTtQfRqcH2GmGcEL8MPDptMs4HEJvjeLvdIIzT1Du7DDfU8tfuFZK
C8v1tjL57Tcm+ORroVyQrImwkOxfJUDKKlz52p6o1fGp7W249H9/r29I+e5LCx0R
aoywGfl0Mi8i1U6p2AhQu+ywsdDyZEnSMoKyIjDckpLbe00AhQLfBLSCHf4IYd9I
crMSo0axhB45e+sqZ2OSfbxIMWrHuFDzjLMTdtXzHsJ6910MnsjRjZKcFNaKpqyd
Lm3PeGG0admpmHsu6jQBEwAVby7SSJ/+m6oiqUAvNfDrWCDsd8tA5iFhUGe8qnTZ
QE8DGOOzd+GcEaC+93MK9jYaiGdbWgCSTVv/7akY/+sEd5bLBPc/HEnkWxuDlPnU
aK1A7g0b3ijODbHLBEE6a5BVZ/ZC9JlCh3UGuJubzgAfrxligRme3HEsH2oj5gIH
nHW2ehWNif+5Bhq+S/2WrhhYS8dY+WoEgaQW0VHJZLAu9FnjgOMQdbOxY8wCuNR4
PIvwM4yIhaEUy2Bh0OFmXRzaqP+ZqTub+IVLkSZ9ULAqt06SdPbxGjLwImv/QyNZ
mL7clr2JtyxYQiuqZ46y2WfM0Cv+NAVWh3R7DGxzWf1Oht4SfmYZTHtzLzbBnLjP
ZGRC9umNrSDw75KPRzDdRJsPIO/38B2CPv2ati1cdurleYvbOh+LKEThfmO/ay65
UU63fU0H1esBro/JW/z7jCLBJ1aO2rTmYCFwtxAsQPs/yNrATwmBjlnAEnzCzT6f
O1+AFT3I/dTEiHIaXfvQBGhSblIymlYXPiIG0gZSZH4370WhNg86o1yd34ITeH3j
JzuOkawQY3hQR5n1XPUQzioaqWIyFwxL98pMTQpskJtwMG+U0m6ahaMsi3bhwd5b
6srFj0qdUeaZFZVUkPqnYithICYL7FewAzA23hDZ8Pj5pLNtFHkcywGs2EEGeeTC
sV1QCESVDQcSzlZ6tJNmJgUTK9dUHrq4DQrk5Ozg/xQ64wgqeiPEiaqT8lSFDDY/
NOTFPgbd1O3JNT3h7U59mTiDtdd4LFk4LRcu+A6q8G54aVTe/dqysllQi9eBO5qv
u+yV7W0ph96m7z1DHuhVTlM0fg2l//fuxnDZJICfg45BNhN/Zb9RhfS7Fhhq7M1c
bLu2Hteret0PXeC38dGv1Gah79KSrOw5k3kU/NG0ZlC01svkrNXLA6bcZuJWpajM
4fBkUc93wSLonIbSfXK7J3OQjI9fyu4aifxuS/D9GQlfckLFu8CMn+4qfMv6UBir
lr1hOLNqsUnfliUgnzp5EE7eWKcZKxwnJ4qsxuGDTytKyPPKetY2glOp0kkT2S/h
zOWN81VmhPqHPrBSgDvf0KZUtllx0NNGb0Pb9gW5hnGmH0VgeYsI8saR5wGuUkf4
EOF
)"
}
function set_up_ssh_private_key() {
if [ -z "$1" ]; then
echo >&2 "Failed to decode and decrypt SSH private key: no secret key was provided."
return 1
fi
local ssh_private_key_path="$HOME/.ssh/weaveworks_cit_id_rsa"
[ -e "$ssh_private_key_path" ] && rm -f "$ssh_private_key_path"
ssh_private_key "$1" >"$ssh_private_key_path"
chmod 400 "$ssh_private_key_path"
echo "$ssh_private_key_path"
}
function gcp_credentials() {
# The below GCP service account JSON credentials have been AES256-encrypted and then Base64-encoded using the following command:
# $ openssl enc -in ~/.ssh/weaveworks-cit.json -e -aes256 -pass stdin | openssl base64 > /tmp/weaveworks-cit.json.aes.b64
# The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout.
# N.B.: Ask the password to Marc, or otherwise re-generate the credentials for GCP, as per ../tools/provisioning/gcp/README.md.
decrypt "$1" "JSON credentials" "$(
cat <<EOF
U2FsdGVkX1+ocXXvu+jCI7Ka0GK9BbCIOKehuIbrvWZl/EhB44ebW7OyO8RTVqTg
xWuktqt+e0FDWerCFY5xHeVDBN0In9uH+IWfnXp4IcJIes16olZHnyS3e6+L5Xc6
oWm+ZQ15OMa9vA+t3CMpuuwd/EIC1OSyDaxK4Gcta91zH6sN97F0NVjciPyjNhly
3kx0uuHzI0KW4EGuAPxF1pOFwIvCJVwrtjygtyf9ymVZ1wGMe/oUyRolMBjfPJvi
YCF65zN1wghHtcqyatov/ZesiF/XEFn/wK5aUR+wAEoQdR5/hN7cL8qZteUUYGV4
O6tI8AoCKPHyU83KevwD0N34JIfwhloOQtnxBTwMCLpqIZzEFTnD/OL6afDkUHW+
bWGQ3di92lLuOYOZ1mCfvblYZssDpVj79Uu8nwJPnaf334T6jDzc4N/cyaIyHsNz
ydJ7NXV9Ccs38JhQPDY+BkQAXZRXJVVgMLZIGU4ARxYaRFTnXdFE5bM4rRM4m4UY
lQbeoYrB6fH9eqpxc3A3CqHxGDTg+J8WqC/nZVX6NzBWCQVOxERi7KVfV6l387Qy
w5PRjl3X+3Z14k15eIOVb25ZnnmTwgKm/xdm3j47spStVRbMsa1nbXLINrYs0XoW
eVyYxHD3bWFZ7blTlGaNecmjECecQ7VS/EmNeNFiigaIeArB0GZcq0xx+J/VUXW+
q3VCw2D5bYOCC1ApZ4iOXLXERfGyHetkt++veEJ61EZWcc0o2g9Ck4r7JYLFfEEz
Wik08WH+tGksYnCHH3gxjTGbLR7jsEKgBQkcsGsIwm/w950QfAug0C+X6csNJwPY
mm47hHfdSa3p6fgPNKVA2RXA/cAUzfNL65cm7vSjqWLaGPnkVAZwySIqZSUkjQz3
OOACnvmsJnHYO8q730MzSJ/qG+2v4nQ0e9OlbV4jqsrYKrFLcCJIUx2AhwddkIy6
EA7uJvt8MiBpErc+g1IdLxDhoU7pTnN3wocA8mufMcnNBRVv9v4oYY6eGWWo62op
+kpglrcouGjTV0LJDalp9ejxtjFQ+sCqvUzmgmcTD2iqP4+VX4/jglKeUnj4XeID
DwyCYNyZg70V/H7ZbLDfE5SJkH+iALJnQZGfPrXtn1RdoI7Hh9Ix0xYizGozwF72
WQC+Td17XpINn5kPr5j8CVps5C7NDbZR747XbfHkWRVVCt2gCf4R8JM2u+Gh8wPP
aj8ziSF9ndZr/jQy8cF2OrmGRemCDVabEiBdNRq6CxwuTwoMRREC5zT4mIFWrflv
UZvXfKiw4Dd4tohkOC/U6DfWNzzIy4UBvVZOgNjAyyJLChTHrHdxHbG7hloAlfGM
kijPYqQhsAL9LxTco7ANexSdMPfkHOLEGcY5or4z6WifRY9lRa1Fa4fguGHCRj/T
e67JFe5NM3Aq++8jLH/5ZpWP6xAiMLz/EYVNZ5nTnWnsz3yDSm7Fk8dtgRF0P7My
FpVWot2/B1eKWjfnwsqMg3yRH7k0bFaz7NzVbkHkUIsUgFzaH7/NlaaP9/GyYNKj
c7QC6MbTjgxK1wlGmjN+to59o+CLns+z6rv42u7JDEikLQ0jVRPDCd6zJk3Vnabs
wP2yohi/u2GraAevBcQIqxFRnk8F8Ds+kydNXxCfX3pXgGEp5bV8+ZrTt8HcQ4dv
23Oulur38vep0ghF4wCoIvbGauLCQqmc4Ct1phjyVMNKOx1VLXI37uoIh+0d+Y/6
hqxLYKCfvRmeSdAUBTxAihMY1vioNZ8iu83WDnxioREC+skejr3s2nENSA/bxl9h
6ETVYwXxEshj2Im6xVZzX3W1fI6HK51M2ttglGLpzvwqPeWH/PFmRRtLjGTk9myM
wGOG2RBwoXR2UCOWwfg2iSE3iEJYAcLSFs1m71y7uXKF3wVb4Hpn11UljAUyo6lH
bRTgEfyulLS7VJ8Vj0pvxnE72qJPOSe5xMWgjVaqHUH6hSkra5EfkyXRk+49vIU1
z6TIX+AMYU2ZXvkDbTGck7nMNmQW7uBwHCy0JuYoM9g71UUyYAGb+vemGPvU77U5
UzKpGNYt6pMC+pPZkYWXq7553dP0o3iftArVp7DaweP134ROn4HYnSL/zpKXZnG/
toWhQVjrw23kfTI4lOFNhfs+vw5sLSoBDXdDS09fjDxot5Ws1nxojUmx3HroTkcw
ce5bGW7FYWxxlY4yBPbliXJcJ/4yewDxWL2qOkGL+G5ztRMHPEOmfQrUtqB8tSMZ
Bn0eMSp1lnkloPkfNkRguxBbJDwbrl06fkmGTCyDjToqqBVVXSSRHA2+pJzsRGWA
0UuDkdINaSGgqX8GNa5iJaVGUKEUSbmM7G5maeKdgiwHn2qdJ73/rIHxg1DNC9UB
LP1+wWpfeAdqidpErXJ7PRpsIA3UBNcDhQALk9U3Y+33xQQOQYtaFwI/CBUGlVub
FgR0tWJZWd/GbRMP2MRH7CJ3//kkW8/O+pFRZfrtjc6ZMlChoRQyGA3OMissrGsW
GoXjO+3wwNDkZIUtLuYHQhUJ1u/n3wOsOp0gTQa0222ofVitPniGkCtqgVScBJTd
l9SNCvhDR9sAkkEDi0VAplPiJZHjhAFb+WmN6cwTH8CVjb0CKcu3rfCVHlbLqrwU
7JMq2gmoYcDs9+4SJu7BTc3++z1pPgvE4JBNk9SdDMa+du7e1YEemrbUfb/GSvkD
R97jYPXFD9g7IaHePZemLoRbwoMapDp6WJrfIYqoh3Vw7zh6ZfmcAjkELXei3DS1
sySA66syQKGk5G2xFxr3mQzywOa2JfstK1JftvzEmIpav6rCcaqdA0pM1PHJ5AVa
LjMEl6To9fk99Cfp77OY18/xPYfxrcEqt4yGTJP1RnGxLaY961T6PI7EYJ3mfeTx
CwROwr8ZoNc5OnRmh+rdJMsNG/qFvI1Ys0nE1EehyKizoXYQKkjcrWnjA0RDk/dq
kP2CuKF1ChBNSaKROttn8QOyOU7fxYFhqhnoH9JzYtxaw2EcGARkgCJtEVHRevzC
hRo4VM+zwS9iNMVJiHA2C9CY+LXwgCDBg60Gu8/cAzriDeDdKFCCNYDA3Eqp8gOE
LJC6/tcToHqLztWEvnB4h+Fs9GUZT1sLyHudQiiP8kR06Y4+Dq3sytk6B44VD0P2
EOF
)"
}
# shellcheck disable=2155
function do_on() {
# Set up everything required to run tests on Digital Ocean.
# Steps from ../tools/provisioning/do/README.md have been followed.
# All sensitive files have been encrypted, see respective functions.
if [ -z "$SECRET_KEY" ]; then
echo >&2 "Failed to configure for Digital Ocean: no value for the SECRET_KEY environment variable."
return 1
fi
# SSH public key:
export TF_VAR_do_public_key_path="$HOME/.ssh/weaveworks_cit_id_rsa.pub"
ssh_public_key >"$TF_VAR_do_public_key_path"
export DIGITALOCEAN_SSH_KEY_NAME="weaveworks-cit"
export TF_VAR_do_public_key_id=5228799
# SSH private key:
export TF_VAR_do_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY")
# API token:
# The below Digital Ocean token has been AES256-encrypted and then Base64-encoded using the following command:
# $ openssl enc -in /tmp/digital_ocean_token.txt -e -aes256 -pass stdin | openssl base64 > /tmp/digital_ocean_token.txt.aes.b64
# The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout.
# N.B.: Ask the password to Marc, or otherwise re-generate the token for Digital Ocean, as per ../tools/provisioning/do/README.md.
export DIGITALOCEAN_TOKEN=$(decrypt "$SECRET_KEY" "Digital Ocean token" "U2FsdGVkX1/Gq5Rj9dDDraME8xK30JOyJ9dhfQzPBaaePJHqDPIG6of71DdJW0UyFUyRtbRflCPaZ8Um1pDJpU5LoNWQk4uCApC8+xciltT73uQtttLBG8FqgFBvYIHS")
export DIGITALOCEAN_TOKEN_NAME="weaveworks-cit"
export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
}
alias do_on='do_on'
function do_off() {
unset TF_VAR_do_public_key_path
unset DIGITALOCEAN_SSH_KEY_NAME
unset TF_VAR_do_public_key_id
unset TF_VAR_do_private_key_path
unset DIGITALOCEAN_TOKEN
unset DIGITALOCEAN_TOKEN_NAME
unset TF_VAR_client_ip
}
alias do_off='do_off'
# shellcheck disable=2155
function gcp_on() {
# Set up everything required to run tests on GCP.
# Steps from ../tools/provisioning/gcp/README.md have been followed.
# All sensitive files have been encrypted, see respective functions.
if [ -z "$SECRET_KEY" ]; then
echo >&2 "Failed to configure for Google Cloud Platform: no value for the SECRET_KEY environment variable."
return 1
fi
# SSH public key and SSH username:
export TF_VAR_gcp_public_key_path="$HOME/.ssh/weaveworks_cit_id_rsa.pub"
ssh_public_key >"$TF_VAR_gcp_public_key_path"
export TF_VAR_gcp_username=$(cut -d' ' -f3 "$TF_VAR_gcp_public_key_path" | cut -d'@' -f1)
# SSH private key:
export TF_VAR_gcp_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY")
# JSON credentials:
export GOOGLE_CREDENTIALS_FILE="$HOME/.ssh/weaveworks-cit.json"
[ -e "$GOOGLE_CREDENTIALS_FILE" ] && rm -f "$GOOGLE_CREDENTIALS_FILE"
gcp_credentials "$SECRET_KEY" >"$GOOGLE_CREDENTIALS_FILE"
chmod 400 "$GOOGLE_CREDENTIALS_FILE"
export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE")
export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
export TF_VAR_gcp_project="${PROJECT:-"weave-net-tests"}"
# shellcheck disable=2015
[ -z "$PROJECT" ] && echo >&2 "WARNING: no value provided for PROJECT environment variable: defaulted it to $TF_VAR_gcp_project." || true
}
alias gcp_on='gcp_on'
function gcp_off() {
unset TF_VAR_gcp_public_key_path
unset TF_VAR_gcp_username
unset TF_VAR_gcp_private_key_path
unset GOOGLE_CREDENTIALS_FILE
unset GOOGLE_CREDENTIALS
unset TF_VAR_client_ip
unset TF_VAR_gcp_project
}
alias gcp_off='gcp_off'
# shellcheck disable=2155
function aws_on() {
# Set up everything required to run tests on Amazon Web Services.
# Steps from ../tools/provisioning/aws/README.md have been followed.
# All sensitive files have been encrypted, see respective functions.
if [ -z "$SECRET_KEY" ]; then
echo >&2 "Failed to configure for Amazon Web Services: no value for the SECRET_KEY environment variable."
return 1
fi
# SSH public key:
export TF_VAR_aws_public_key_name="weaveworks_cit_id_rsa"
# SSH private key:
export TF_VAR_aws_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY")
# The below AWS access key ID and secret access key have been AES256-encrypted and then Base64-encoded using the following commands:
# $ openssl enc -in /tmp/aws_access_key_id.txt -e -aes256 -pass stdin | openssl base64 > /tmp/aws_access_key_id.txt.aes.b64
# $ openssl enc -in /tmp/aws_secret_access_key.txt -e -aes256 -pass stdin | openssl base64 > /tmp/aws_secret_access_key.txt.aes.b64
# The below commands do the reverse, i.e. base64-decode and AES-decrypt the encrypted and encoded strings, and print it to stdout.
# N.B.: Ask the password to Marc, or otherwise re-generate the AWS access key ID and secret access key, as per ../tools/provisioning/aws/README.md.
export AWS_ACCESS_KEY_ID="$(decrypt "$SECRET_KEY" "AWS access key ID" "U2FsdGVkX18Txjm2PWSlJsToYm1vv4dMTtVLkRNiQbrC6Y6GuIHb1ao5MmGPJ1wf")"
export AWS_SECRET_ACCESS_KEY="$(decrypt "$SECRET_KEY" "AWS secret access key" "$(
cat <<EOF
U2FsdGVkX1/BFp/lQnSoy0LxUuDz0z0YnqxhO8KBrtt3x6YEWyVFzY34rFhpGiB7
IxYq20K87Zrx/Q/urMoWgg==
EOF
)")"
export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
}
alias aws_on='aws_on'
function aws_off() {
unset TF_VAR_aws_public_key_name
unset TF_VAR_aws_private_key_path
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset TF_VAR_client_ip
}
alias aws_off='aws_off'
function tf_ssh_usage() {
cat >&2 <<-EOF
ERROR: $1
Usage:
\$ tf_ssh <host ID (1-based)> [OPTION]...
Examples:
\$ tf_ssh 1
\$ tf_ssh 1 -o LogLevel VERBOSE
\$ tf_ssh 1 -i ~/.ssh/custom_private_key_id_rsa
Available machines:
EOF
cat -n >&2 <<<"$(terraform output public_etc_hosts)"
}
# shellcheck disable=SC2155
function tf_ssh() {
[ -z "$1" ] && tf_ssh_usage "No host ID provided." && return 1
local ip="$(sed "$1q;d" <<<"$(terraform output public_etc_hosts)" | cut -d ' ' -f 1)"
shift # Drop the first argument, corresponding to the machine ID, to allow passing other arguments to SSH using "$@" -- see below.
[ -z "$ip" ] && tf_ssh_usage "Invalid host ID provided." && return 1
# shellcheck disable=SC2029
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" "$(terraform output username)@$ip"
}
alias tf_ssh='tf_ssh'
function tf_ansi_usage() {
cat >&2 <<-EOF
ERROR: $1
Usage:
\$ tf_ansi <playbook or playbook ID (1-based)> [OPTION]...
Examples:
\$ tf_ansi setup_weave-net_dev
\$ tf_ansi 1
\$ tf_ansi 1 -vvv --private-key=~/.ssh/custom_private_key_id_rsa
\$ tf_ansi setup_weave-kube --extra-vars "docker_version=1.12.6 kubernetes_version=1.5.6"
Available playbooks:
EOF
cat -n >&2 <<<"$(for file in "$(dirname "${BASH_SOURCE[0]}")"/../../config_management/*.yml; do basename "$file" | sed 's/.yml//'; done)"
}
# shellcheck disable=SC2155,SC2064
function tf_ansi() {
[ -z "$1" ] && tf_ansi_usage "No Ansible playbook provided." && return 1
local id="$1"
shift # Drop the first argument to allow passing other arguments to Ansible using "$@" -- see below.
if [[ "$id" =~ ^[0-9]+$ ]]; then
local playbooks=(../../config_management/*.yml)
local path="${playbooks[(($id - 1))]}" # Select the ith entry in the list of playbooks (0-based).
else
local path="$(dirname "${BASH_SOURCE[0]}")/../../config_management/$id.yml"
fi
local inventory="$(mktemp /tmp/ansible_inventory_XXX)"
trap 'rm -f $inventory' SIGINT SIGTERM RETURN
echo -e "$(terraform output ansible_inventory)" >"$inventory"
[ ! -r "$path" ] && tf_ansi_usage "Ansible playbook not found: $path" && return 1
ansible-playbook "$@" -u "$(terraform output username)" -i "$inventory" --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" "$path"
}
alias tf_ansi='tf_ansi'

53
push-images Executable file
View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
QUAY_PREFIX=quay.io/
IMAGES=$(make images)
IMAGE_TAG=$(./tools/image-tag)
usage() {
echo "$0 [-no-docker-hub]"
}
NO_DOCKER_HUB=
while [ $# -gt 0 ]; do
case "$1" in
-no-docker-hub)
NO_DOCKER_HUB=1
shift 1
;;
*)
usage
exit 2
;;
esac
done
pids=""
for image in ${IMAGES}; do
if [[ "$image" == *"build"* ]]; then
continue
fi
echo "Will push ${image}:${IMAGE_TAG}"
docker push "${image}:${IMAGE_TAG}" &
pids="$pids $!"
if [ -z "$NO_DOCKER_HUB" ]; then
# remove the quey prefix and push to docker hub
docker_hub_image=${image#$QUAY_PREFIX}
docker tag "${image}:${IMAGE_TAG}" "${docker_hub_image}:${IMAGE_TAG}"
echo "Will push ${docker_hub_image}:${IMAGE_TAG}"
docker push "${docker_hub_image}:${IMAGE_TAG}" &
pids="$pids $!"
fi
done
# Wait individually for tasks so we fail-exit on any non-zero return code
for p in $pids; do
wait $p
done
wait

View File

@ -15,7 +15,7 @@ import (
"time"
"github.com/mgutz/ansi"
"github.com/weaveworks/docker/pkg/mflag"
"github.com/weaveworks/common/mflag"
)
const (
@ -148,9 +148,10 @@ func updateScheduler(test string, duration float64) {
func getSchedule(tests []string) ([]string, error) {
var (
userName = os.Getenv("CIRCLE_PROJECT_USERNAME")
project = os.Getenv("CIRCLE_PROJECT_REPONAME")
buildNum = os.Getenv("CIRCLE_BUILD_NUM")
testRun = project + "-integration-" + buildNum
testRun = userName + "-" + project + "-integration-" + buildNum
shardCount = os.Getenv("CIRCLE_NODE_TOTAL")
shardID = os.Getenv("CIRCLE_NODE_INDEX")
requestBody = &bytes.Buffer{}

12
sched
View File

@ -1,20 +1,20 @@
#!/usr/bin/python
import sys, string, json, urllib
#!/usr/bin/env python
import sys, string, urllib
import requests
import optparse
def test_time(target, test_name, runtime):
r = requests.post(target + "/record/%s/%f" % (urllib.quote(test_name, safe=""), runtime))
print r.text
print r.text.encode('utf-8')
assert r.status_code == 204
def test_sched(target, test_run, shard_count, shard_id):
tests = json.dumps({'tests': string.split(sys.stdin.read())})
r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), data=tests)
tests = {'tests': string.split(sys.stdin.read())}
r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), json=tests)
assert r.status_code == 200
result = r.json()
for test in sorted(result['tests']):
print test
print test.encode('utf-8')
def usage():
print "%s (--target=...) <cmd> <args..>" % sys.argv[0]

View File

@ -19,120 +19,188 @@ app.debug = True
# observations faster.
alpha = 0.3
class Test(ndb.Model):
total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA
total_runs = ndb.IntegerProperty(default=0)
total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA
total_runs = ndb.IntegerProperty(default=0)
def parallelism(self):
name = self.key.string_id()
m = re.search('(\d+)_test.sh$', name)
if m is None:
return 1
else:
return int(m.group(1))
def parallelism(self):
name = self.key.string_id()
m = re.search('(\d+)_test.sh$', name)
if m is None:
return 1
else:
return int(m.group(1))
def cost(self):
p = self.parallelism()
logging.info("Test %s has parallelism %d and avg run time %s",
self.key.string_id(), p, self.total_run_time)
return self.parallelism() * self.total_run_time
def cost(self):
p = self.parallelism()
logging.info("Test %s has parallelism %d and avg run time %s", self.key.string_id(), p, self.total_run_time)
return self.parallelism() * self.total_run_time
class Schedule(ndb.Model):
shards = ndb.JsonProperty()
shards = ndb.JsonProperty()
@app.route('/record/<path:test_name>/<runtime>', methods=['POST'])
@ndb.transactional
def record(test_name, runtime):
test = Test.get_by_id(test_name)
if test is None:
test = Test(id=test_name)
test.total_run_time = (test.total_run_time * (1-alpha)) + (float(runtime) * alpha)
test.total_runs += 1
test.put()
return ('', 204)
test = Test.get_by_id(test_name)
if test is None:
test = Test(id=test_name)
test.total_run_time = (test.total_run_time *
(1 - alpha)) + (float(runtime) * alpha)
test.total_runs += 1
test.put()
return ('', 204)
@app.route('/schedule/<test_run>/<int:shard_count>/<int:shard>', methods=['POST'])
@app.route(
'/schedule/<test_run>/<int:shard_count>/<int:shard>', methods=['POST'])
def schedule(test_run, shard_count, shard):
# read tests from body
test_names = flask.request.get_json(force=True)['tests']
# read tests from body
test_names = flask.request.get_json(force=True)['tests']
# first see if we have a scedule already
schedule_id = "%s-%d" % (test_run, shard_count)
schedule = Schedule.get_by_id(schedule_id)
if schedule is not None:
# first see if we have a scedule already
schedule_id = "%s-%d" % (test_run, shard_count)
schedule = Schedule.get_by_id(schedule_id)
if schedule is not None:
return flask.json.jsonify(tests=schedule.shards[str(shard)])
# if not, do simple greedy algorithm
test_times = ndb.get_multi(
ndb.Key(Test, test_name) for test_name in test_names)
def avg(test):
if test is not None:
return test.cost()
return 1
test_times = [(test_name, avg(test))
for test_name, test in zip(test_names, test_times)]
test_times_dict = dict(test_times)
test_times.sort(key=operator.itemgetter(1))
shards = {i: [] for i in xrange(shard_count)}
while test_times:
test_name, time = test_times.pop()
# find shortest shard and put it in that
s, _ = min(
((i, sum(test_times_dict[t] for t in shards[i]))
for i in xrange(shard_count)),
key=operator.itemgetter(1))
shards[s].append(test_name)
# atomically insert or retrieve existing schedule
schedule = Schedule.get_or_insert(schedule_id, shards=shards)
return flask.json.jsonify(tests=schedule.shards[str(shard)])
# if not, do simple greedy algorithm
test_times = ndb.get_multi(ndb.Key(Test, test_name) for test_name in test_names)
def avg(test):
if test is not None:
return test.cost()
return 1
test_times = [(test_name, avg(test)) for test_name, test in zip(test_names, test_times)]
test_times_dict = dict(test_times)
test_times.sort(key=operator.itemgetter(1))
shards = {i: [] for i in xrange(shard_count)}
while test_times:
test_name, time = test_times.pop()
FIREWALL_REGEXES = [
re.compile(
r'^(?P<network>\w+)-allow-(?P<type>\w+)-(?P<build>\d+)-(?P<shard>\d+)$'
),
re.compile(r'^(?P<network>\w+)-(?P<build>\d+)-(?P<shard>\d+)-allow-'
r'(?P<type>[\w\-]+)$'),
]
NAME_REGEXES = [
re.compile(r'^host(?P<index>\d+)-(?P<build>\d+)-(?P<shard>\d+)$'),
re.compile(r'^test-(?P<build>\d+)-(?P<shard>\d+)-(?P<index>\d+)$'),
]
# find shortest shard and put it in that
s, _ = min(((i, sum(test_times_dict[t] for t in shards[i]))
for i in xrange(shard_count)), key=operator.itemgetter(1))
shards[s].append(test_name)
def _matches_any_regex(name, regexes):
for regex in regexes:
matches = regex.match(name)
if matches:
return matches
# atomically insert or retrieve existing schedule
schedule = Schedule.get_or_insert(schedule_id, shards=shards)
return flask.json.jsonify(tests=schedule.shards[str(shard)])
NAME_RE = re.compile(r'^host(?P<index>\d+)-(?P<build>\d+)-(?P<shard>\d+)$')
PROJECTS = [
('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a'),
('weaveworks/scope', 'scope-integration-tests', 'us-central1-a'),
('weaveworks/weave', 'weave-net-tests', 'us-central1-a', True),
('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a', True),
('weaveworks/scope', 'scope-integration-tests', 'us-central1-a', False),
]
@app.route('/tasks/gc')
def gc():
# Get list of running VMs, pick build id out of VM name
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
# Get list of running VMs, pick build id out of VM name
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
for repo, project, zone in PROJECTS:
gc_project(compute, repo, project, zone)
for repo, project, zone, gc_fw in PROJECTS:
gc_project(compute, repo, project, zone, gc_fw)
return "Done"
return "Done"
def gc_project(compute, repo, project, zone):
logging.info("GCing %s, %s, %s", repo, project, zone)
instances = compute.instances().list(project=project, zone=zone).execute()
if 'items' not in instances:
return
host_by_build = collections.defaultdict(list)
for instance in instances['items']:
matches = NAME_RE.match(instance['name'])
if matches is None:
continue
host_by_build[int(matches.group('build'))].append(instance['name'])
logging.info("Running VMs by build: %r", host_by_build)
def gc_project(compute, repo, project, zone, gc_fw):
logging.info("GCing %s, %s, %s", repo, project, zone)
# Get list of builds, filter down to running builds:
running = _get_running_builds(repo)
# Stop VMs for builds that aren't running:
_gc_compute_engine_instances(compute, project, zone, running)
# Remove firewall rules for builds that aren't running:
if gc_fw:
_gc_firewall_rules(compute, project, running)
# Get list of builds, filter down to runnning builds
result = urlfetch.fetch('https://circleci.com/api/v1/project/%s' % repo,
headers={'Accept': 'application/json'})
assert result.status_code == 200
builds = json.loads(result.content)
running = {build['build_num'] for build in builds if not build.get('stop_time')}
logging.info("Runnings builds: %r", running)
# Stop VMs for builds that aren't running
stopped = []
for build, names in host_by_build.iteritems():
if build in running:
continue
for name in names:
stopped.append(name)
logging.info("Stopping VM %s", name)
compute.instances().delete(project=project, zone=zone, instance=name).execute()
def _get_running_builds(repo):
result = urlfetch.fetch(
'https://circleci.com/api/v1/project/%s' % repo,
headers={'Accept': 'application/json'})
assert result.status_code == 200
builds = json.loads(result.content)
running = {
build['build_num']
for build in builds if not build.get('stop_time')
}
logging.info("Runnings builds: %r", running)
return running
return
def _get_hosts_by_build(instances):
host_by_build = collections.defaultdict(list)
for instance in instances['items']:
matches = _matches_any_regex(instance['name'], NAME_REGEXES)
if not matches:
continue
host_by_build[int(matches.group('build'))].append(instance['name'])
logging.info("Running VMs by build: %r", host_by_build)
return host_by_build
def _gc_compute_engine_instances(compute, project, zone, running):
instances = compute.instances().list(project=project, zone=zone).execute()
if 'items' not in instances:
return
host_by_build = _get_hosts_by_build(instances)
stopped = []
for build, names in host_by_build.iteritems():
if build in running:
continue
for name in names:
stopped.append(name)
logging.info("Stopping VM %s", name)
compute.instances().delete(
project=project, zone=zone, instance=name).execute()
return stopped
def _gc_firewall_rules(compute, project, running):
firewalls = compute.firewalls().list(project=project).execute()
if 'items' not in firewalls:
return
for firewall in firewalls['items']:
matches = _matches_any_regex(firewall['name'], FIREWALL_REGEXES)
if not matches:
continue
if int(matches.group('build')) in running:
continue
logging.info("Deleting firewall rule %s", firewall['name'])
compute.firewalls().delete(
project=project, firewall=firewall['name']).execute()

View File

@ -9,8 +9,8 @@ import (
"text/template"
socks5 "github.com/armon/go-socks5"
"github.com/weaveworks/docker/pkg/mflag"
"github.com/weaveworks/weave/common/mflagext"
"github.com/weaveworks/common/mflag"
"github.com/weaveworks/common/mflagext"
"golang.org/x/net/context"
)

69
test
View File

@ -3,12 +3,15 @@
set -e
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
GO_TEST_ARGS=(-tags netgo -cpu 4 -timeout 8m)
SLOW=
NO_GO_GET=
NO_GO_GET=true
TAGS=
PARALLEL=
RACE="-race -covermode=atomic"
TIMEOUT=1m
usage() {
echo "$0 [-slow] [-in-container foo]"
echo "$0 [-slow] [-in-container foo] [-netgo] [-(no-)go-get] [-timeout 1m]"
}
while [ $# -gt 0 ]; do
@ -17,10 +20,34 @@ while [ $# -gt 0 ]; do
SLOW=true
shift 1
;;
"-no-race")
RACE=
shift 1
;;
"-no-go-get")
NO_GO_GET=true
shift 1
;;
"-go-get")
NO_GO_GET=
shift 1
;;
"-netgo")
TAGS="netgo"
shift 1
;;
"-tags")
TAGS="$2"
shift 2
;;
"-p")
PARALLEL=true
shift 1
;;
"-timeout")
TIMEOUT=$2
shift 2
;;
*)
usage
exit 2
@ -28,12 +55,14 @@ while [ $# -gt 0 ]; do
esac
done
GO_TEST_ARGS=(-tags "${TAGS[@]}" -cpu 4 -timeout $TIMEOUT)
if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then
SLOW=true
fi
if [ -n "$SLOW" ]; then
GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" -race -covermode=atomic)
GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" ${RACE})
# shellcheck disable=SC2153
if [ -n "$COVERDIR" ]; then
@ -49,7 +78,7 @@ fail=0
if [ -z "$TESTDIRS" ]; then
# NB: Relies on paths being prefixed with './'.
TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|prog|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|'))
TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|'))
else
# TESTDIRS on the right side is not really an array variable, it
# is just a string with spaces, but it is written like that to
@ -60,7 +89,7 @@ fi
# If running on circle, use the scheduler to work out what tests to run on what shard
if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then
PREFIX=$(go list -e ./ | sed -e 's/\//-/g')
TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX"))
TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_PROJECT_USERNAME-$CIRCLE_PROJECT_REPONAME-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX"))
echo "${TESTDIRS[@]}"
fi
@ -69,33 +98,51 @@ PACKAGE_BASE=$(go list -e ./)
# Speed up the tests by compiling and installing their dependencies first.
go test -i "${GO_TEST_ARGS[@]}" "${TESTDIRS[@]}"
for dir in "${TESTDIRS[@]}"; do
run_test() {
local dir=$1
if [ -z "$NO_GO_GET" ]; then
go get -t -tags netgo "$dir"
go get -t -tags "${TAGS[@]}" "$dir"
fi
GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}")
local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}")
if [ -n "$SLOW" ]; then
local COVERPKGS
COVERPKGS=$( (
go list "$dir"
go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/"
) | paste -s -d, -)
local output
output=$(mktemp "$coverdir/unit.XXXXXXXXXX")
GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS)
local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS)
fi
local START
START=$(date +%s)
if ! go test "${GO_TEST_ARGS_RUN[@]}" "$dir"; then
fail=1
fi
RUNTIME=$(($(date +%s) - START))
local END
END=$(date +%s)
local RUNTIME=$((END - START))
# Report test runtime when running on circle, to help scheduler
if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then
"$DIR/sched" time "$dir" "$RUNTIME"
fi
}
for dir in "${TESTDIRS[@]}"; do
if [ -n "$PARALLEL" ]; then
run_test "$dir" &
else
run_test "$dir"
fi
done
if [ -n "$PARALLEL" ]; then
wait
fi
if [ -n "$SLOW" ] && [ -z "$COVERDIR" ]; then
go get github.com/weaveworks/tools/cover
cover "$coverdir"/* >profile.cov

27
vendor/github.com/mvdan/sh/LICENSE generated vendored
View File

@ -1,27 +0,0 @@
Copyright (c) 2015, Daniel Martí. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,202 +0,0 @@
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"regexp"
"runtime/pprof"
"strings"
"github.com/mvdan/sh/syntax"
)
var (
write = flag.Bool("w", false, "write result to file instead of stdout")
list = flag.Bool("l", false, "list files whose formatting differs from shfmt's")
indent = flag.Int("i", 0, "indent: 0 for tabs (default), >0 for number of spaces")
posix = flag.Bool("p", false, "parse POSIX shell code instead of bash")
cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
parseMode syntax.ParseMode
printConfig syntax.PrintConfig
readBuf, writeBuf bytes.Buffer
out io.Writer
)
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
out = os.Stdout
printConfig.Spaces = *indent
parseMode |= syntax.ParseComments
if *posix {
parseMode |= syntax.PosixConformant
}
if flag.NArg() == 0 {
if err := formatStdin(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
return
}
anyErr := false
onError := func(err error) {
anyErr = true
fmt.Fprintln(os.Stderr, err)
}
for _, path := range flag.Args() {
walk(path, onError)
}
if anyErr {
os.Exit(1)
}
}
func formatStdin() error {
if *write || *list {
return fmt.Errorf("-w and -l can only be used on files")
}
readBuf.Reset()
if _, err := io.Copy(&readBuf, os.Stdin); err != nil {
return err
}
src := readBuf.Bytes()
prog, err := syntax.Parse(src, "", parseMode)
if err != nil {
return err
}
return printConfig.Fprint(out, prog)
}
var (
shellFile = regexp.MustCompile(`\.(sh|bash)$`)
validShebang = regexp.MustCompile(`^#!\s?/(usr/)?bin/(env *)?(sh|bash)`)
vcsDir = regexp.MustCompile(`^\.(git|svn|hg)$`)
)
type shellConfidence int
const (
notShellFile shellConfidence = iota
ifValidShebang
isShellFile
)
func getConfidence(info os.FileInfo) shellConfidence {
name := info.Name()
switch {
case info.IsDir(), name[0] == '.', !info.Mode().IsRegular():
return notShellFile
case shellFile.MatchString(name):
return isShellFile
case strings.Contains(name, "."):
return notShellFile // different extension
case info.Size() < 8:
return notShellFile // cannot possibly hold valid shebang
default:
return ifValidShebang
}
}
func walk(path string, onError func(error)) {
info, err := os.Stat(path)
if err != nil {
onError(err)
return
}
if !info.IsDir() {
if err := formatPath(path, false); err != nil {
onError(err)
}
return
}
filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if info.IsDir() && vcsDir.MatchString(info.Name()) {
return filepath.SkipDir
}
if err != nil {
onError(err)
return nil
}
conf := getConfidence(info)
if conf == notShellFile {
return nil
}
err = formatPath(path, conf == ifValidShebang)
if err != nil && !os.IsNotExist(err) {
onError(err)
}
return nil
})
}
func empty(f *os.File) error {
if err := f.Truncate(0); err != nil {
return err
}
_, err := f.Seek(0, 0)
return err
}
func formatPath(path string, checkShebang bool) error {
openMode := os.O_RDONLY
if *write {
openMode = os.O_RDWR
}
f, err := os.OpenFile(path, openMode, 0)
if err != nil {
return err
}
defer f.Close()
readBuf.Reset()
if _, err := io.Copy(&readBuf, f); err != nil {
return err
}
src := readBuf.Bytes()
if checkShebang && !validShebang.Match(src[:32]) {
return nil
}
prog, err := syntax.Parse(src, path, parseMode)
if err != nil {
return err
}
writeBuf.Reset()
printConfig.Fprint(&writeBuf, prog)
res := writeBuf.Bytes()
if !bytes.Equal(src, res) {
if *list {
fmt.Fprintln(out, path)
}
if *write {
if err := empty(f); err != nil {
return err
}
if _, err := f.Write(res); err != nil {
return err
}
}
}
if !*list && !*write {
if _, err := out.Write(res); err != nil {
return err
}
}
return nil
}

View File

@ -1,6 +0,0 @@
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
// Package syntax implements parsing and formatting of shell programs.
// It supports both POSIX Shell and Bash.
package syntax

View File

@ -1,962 +0,0 @@
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package syntax
import (
"bytes"
)
// bytes that form or start a token
func regOps(b byte) bool {
return b == ';' || b == '"' || b == '\'' || b == '(' ||
b == ')' || b == '$' || b == '|' || b == '&' ||
b == '>' || b == '<' || b == '`'
}
// tokenize these inside parameter expansions
func paramOps(b byte) bool {
return b == '}' || b == '#' || b == ':' || b == '-' || b == '+' ||
b == '=' || b == '?' || b == '%' || b == '[' || b == '/' ||
b == '^' || b == ','
}
// tokenize these inside arithmetic expansions
func arithmOps(b byte) bool {
return b == '+' || b == '-' || b == '!' || b == '*' ||
b == '/' || b == '%' || b == '(' || b == ')' ||
b == '^' || b == '<' || b == '>' || b == ':' ||
b == '=' || b == ',' || b == '?' || b == '|' ||
b == '&'
}
func wordBreak(b byte) bool {
return b == ' ' || b == '\t' || b == '\r' || b == '\n' ||
b == '&' || b == '>' || b == '<' || b == '|' ||
b == ';' || b == '(' || b == ')'
}
func (p *parser) next() {
if p.tok == _EOF || p.npos >= len(p.src) {
p.tok = _EOF
return
}
p.spaced, p.newLine = false, false
b, q := p.src[p.npos], p.quote
p.pos = Pos(p.npos + 1)
switch q {
case hdocWord:
if wordBreak(b) {
p.tok = illegalTok
p.spaced = true
return
}
case paramExpRepl:
switch b {
case '}':
p.npos++
p.tok = rightBrace
case '/':
p.npos++
p.tok = Quo
case '`', '"', '$':
p.tok = p.dqToken(b)
default:
p.advanceLitOther(q)
}
return
case dblQuotes:
if b == '`' || b == '"' || b == '$' {
p.tok = p.dqToken(b)
} else {
p.advanceLitDquote()
}
return
case hdocBody, hdocBodyTabs:
if b == '`' || b == '$' {
p.tok = p.dqToken(b)
} else if p.hdocStop == nil {
p.tok = illegalTok
} else {
p.advanceLitHdoc()
}
return
case paramExpExp:
switch b {
case '}':
p.npos++
p.tok = rightBrace
case '`', '"', '$':
p.tok = p.dqToken(b)
default:
p.advanceLitOther(q)
}
return
case sglQuotes:
if b == '\'' {
p.npos++
p.tok = sglQuote
} else {
p.advanceLitOther(q)
}
return
}
skipSpace:
for {
switch b {
case ' ', '\t', '\r':
p.spaced = true
p.npos++
case '\n':
if p.quote == arithmExprLet {
p.tok = illegalTok
p.newLine, p.spaced = true, true
return
}
p.spaced = true
if p.npos < len(p.src) {
p.npos++
}
p.f.Lines = append(p.f.Lines, p.npos)
p.newLine = true
if len(p.heredocs) > p.buriedHdocs {
p.doHeredocs()
if p.tok == _EOF {
return
}
}
case '\\':
if p.npos < len(p.src)-1 && p.src[p.npos+1] == '\n' {
p.npos += 2
p.f.Lines = append(p.f.Lines, p.npos)
} else {
break skipSpace
}
default:
break skipSpace
}
if p.npos >= len(p.src) {
p.tok = _EOF
return
}
b = p.src[p.npos]
}
p.pos = Pos(p.npos + 1)
switch {
case q&allRegTokens != 0:
switch b {
case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`':
p.tok = p.regToken(b)
case '#':
p.npos++
bs, _ := p.readUntil('\n')
p.npos += len(bs)
if p.mode&ParseComments > 0 {
p.f.Comments = append(p.f.Comments, &Comment{
Hash: p.pos,
Text: string(bs),
})
}
p.next()
case '?', '*', '+', '@', '!':
if p.bash() && p.npos+1 < len(p.src) && p.src[p.npos+1] == '(' {
switch b {
case '?':
p.tok = globQuest
case '*':
p.tok = globMul
case '+':
p.tok = globAdd
case '@':
p.tok = globAt
default: // '!'
p.tok = globNot
}
p.npos += 2
} else {
p.advanceLitNone()
}
default:
p.advanceLitNone()
}
case q == paramExpName && paramOps(b):
p.tok = p.paramToken(b)
case q&allArithmExpr != 0 && arithmOps(b):
p.tok = p.arithmToken(b)
case q&allRbrack != 0 && b == ']':
p.npos++
p.tok = rightBrack
case q == testRegexp:
p.advanceLitRe()
case regOps(b):
p.tok = p.regToken(b)
default:
p.advanceLitOther(q)
}
}
func byteAt(src []byte, i int) byte {
if i >= len(src) {
return 0
}
return src[i]
}
func (p *parser) regToken(b byte) Token {
switch b {
case '\'':
p.npos++
return sglQuote
case '"':
p.npos++
return dblQuote
case '`':
p.npos++
return bckQuote
case '&':
switch byteAt(p.src, p.npos+1) {
case '&':
p.npos += 2
return AndExpr
case '>':
if !p.bash() {
break
}
if byteAt(p.src, p.npos+2) == '>' {
p.npos += 3
return appAll
}
p.npos += 2
return rdrAll
}
p.npos++
return And
case '|':
switch byteAt(p.src, p.npos+1) {
case '|':
p.npos += 2
return OrExpr
case '&':
if !p.bash() {
break
}
p.npos += 2
return pipeAll
}
p.npos++
return Or
case '$':
switch byteAt(p.src, p.npos+1) {
case '\'':
if !p.bash() {
break
}
p.npos += 2
return dollSglQuote
case '"':
if !p.bash() {
break
}
p.npos += 2
return dollDblQuote
case '{':
p.npos += 2
return dollBrace
case '[':
if !p.bash() {
break
}
p.npos += 2
return dollBrack
case '(':
if byteAt(p.src, p.npos+2) == '(' {
p.npos += 3
return dollDblParen
}
p.npos += 2
return dollParen
}
p.npos++
return dollar
case '(':
if p.bash() && byteAt(p.src, p.npos+1) == '(' {
p.npos += 2
return dblLeftParen
}
p.npos++
return leftParen
case ')':
p.npos++
return rightParen
case ';':
switch byteAt(p.src, p.npos+1) {
case ';':
if p.bash() && byteAt(p.src, p.npos+2) == '&' {
p.npos += 3
return dblSemiFall
}
p.npos += 2
return dblSemicolon
case '&':
if !p.bash() {
break
}
p.npos += 2
return semiFall
}
p.npos++
return semicolon
case '<':
switch byteAt(p.src, p.npos+1) {
case '<':
if b := byteAt(p.src, p.npos+2); b == '-' {
p.npos += 3
return dashHdoc
} else if p.bash() && b == '<' {
p.npos += 3
return wordHdoc
}
p.npos += 2
return Shl
case '>':
p.npos += 2
return rdrInOut
case '&':
p.npos += 2
return dplIn
case '(':
if !p.bash() {
break
}
p.npos += 2
return cmdIn
}
p.npos++
return Lss
default: // '>'
switch byteAt(p.src, p.npos+1) {
case '>':
p.npos += 2
return Shr
case '&':
p.npos += 2
return dplOut
case '|':
p.npos += 2
return clbOut
case '(':
if !p.bash() {
break
}
p.npos += 2
return cmdOut
}
p.npos++
return Gtr
}
}
func (p *parser) dqToken(b byte) Token {
switch b {
case '"':
p.npos++
return dblQuote
case '`':
p.npos++
return bckQuote
default: // '$'
switch byteAt(p.src, p.npos+1) {
case '{':
p.npos += 2
return dollBrace
case '[':
if !p.bash() {
break
}
p.npos += 2
return dollBrack
case '(':
if byteAt(p.src, p.npos+2) == '(' {
p.npos += 3
return dollDblParen
}
p.npos += 2
return dollParen
}
p.npos++
return dollar
}
}
func (p *parser) paramToken(b byte) Token {
switch b {
case '}':
p.npos++
return rightBrace
case ':':
switch byteAt(p.src, p.npos+1) {
case '+':
p.npos += 2
return ColAdd
case '-':
p.npos += 2
return ColSub
case '?':
p.npos += 2
return ColQuest
case '=':
p.npos += 2
return ColAssgn
}
p.npos++
return Colon
case '+':
p.npos++
return Add
case '-':
p.npos++
return Sub
case '?':
p.npos++
return Quest
case '=':
p.npos++
return Assgn
case '%':
if byteAt(p.src, p.npos+1) == '%' {
p.npos += 2
return dblRem
}
p.npos++
return Rem
case '#':
if byteAt(p.src, p.npos+1) == '#' {
p.npos += 2
return dblHash
}
p.npos++
return Hash
case '[':
p.npos++
return leftBrack
case '^':
if byteAt(p.src, p.npos+1) == '^' {
p.npos += 2
return dblXor
}
p.npos++
return Xor
case ',':
if byteAt(p.src, p.npos+1) == ',' {
p.npos += 2
return dblComma
}
p.npos++
return Comma
default: // '/'
if byteAt(p.src, p.npos+1) == '/' {
p.npos += 2
return dblQuo
}
p.npos++
return Quo
}
}
func (p *parser) arithmToken(b byte) Token {
switch b {
case '!':
if byteAt(p.src, p.npos+1) == '=' {
p.npos += 2
return Neq
}
p.npos++
return Not
case '=':
if byteAt(p.src, p.npos+1) == '=' {
p.npos += 2
return Eql
}
p.npos++
return Assgn
case '(':
p.npos++
return leftParen
case ')':
p.npos++
return rightParen
case '&':
switch byteAt(p.src, p.npos+1) {
case '&':
p.npos += 2
return AndExpr
case '=':
p.npos += 2
return AndAssgn
}
p.npos++
return And
case '|':
switch byteAt(p.src, p.npos+1) {
case '|':
p.npos += 2
return OrExpr
case '=':
p.npos += 2
return OrAssgn
}
p.npos++
return Or
case '<':
switch byteAt(p.src, p.npos+1) {
case '<':
if byteAt(p.src, p.npos+2) == '=' {
p.npos += 3
return ShlAssgn
}
p.npos += 2
return Shl
case '=':
p.npos += 2
return Leq
}
p.npos++
return Lss
case '>':
switch byteAt(p.src, p.npos+1) {
case '>':
if byteAt(p.src, p.npos+2) == '=' {
p.npos += 3
return ShrAssgn
}
p.npos += 2
return Shr
case '=':
p.npos += 2
return Geq
}
p.npos++
return Gtr
case '+':
switch byteAt(p.src, p.npos+1) {
case '+':
p.npos += 2
return Inc
case '=':
p.npos += 2
return AddAssgn
}
p.npos++
return Add
case '-':
switch byteAt(p.src, p.npos+1) {
case '-':
p.npos += 2
return Dec
case '=':
p.npos += 2
return SubAssgn
}
p.npos++
return Sub
case '%':
if byteAt(p.src, p.npos+1) == '=' {
p.npos += 2
return RemAssgn
}
p.npos++
return Rem
case '*':
switch byteAt(p.src, p.npos+1) {
case '*':
p.npos += 2
return Pow
case '=':
p.npos += 2
return MulAssgn
}
p.npos++
return Mul
case '/':
if byteAt(p.src, p.npos+1) == '=' {
p.npos += 2
return QuoAssgn
}
p.npos++
return Quo
case '^':
if byteAt(p.src, p.npos+1) == '=' {
p.npos += 2
return XorAssgn
}
p.npos++
return Xor
case ',':
p.npos++
return Comma
case '?':
p.npos++
return Quest
default: // ':'
p.npos++
return Colon
}
}
func (p *parser) advanceLitOther(q quoteState) {
bs := p.litBuf[:0]
for {
if p.npos >= len(p.src) {
p.tok, p.val = _LitWord, string(bs)
return
}
b := p.src[p.npos]
switch {
case b == '\\': // escaped byte follows
if p.npos == len(p.src)-1 {
p.npos++
bs = append(bs, '\\')
p.tok, p.val = _LitWord, string(bs)
return
}
b = p.src[p.npos+1]
p.npos += 2
if b == '\n' {
p.f.Lines = append(p.f.Lines, p.npos)
} else {
bs = append(bs, '\\', b)
}
continue
case q == sglQuotes:
switch b {
case '\n':
p.f.Lines = append(p.f.Lines, p.npos+1)
case '\'':
p.tok, p.val = _LitWord, string(bs)
return
}
case b == '`', b == '$':
p.tok, p.val = _Lit, string(bs)
return
case q == paramExpExp:
if b == '}' {
p.tok, p.val = _LitWord, string(bs)
return
} else if b == '"' {
p.tok, p.val = _Lit, string(bs)
return
}
case q == paramExpRepl:
if b == '}' || b == '/' {
p.tok, p.val = _LitWord, string(bs)
return
}
case wordBreak(b), regOps(b), q&allArithmExpr != 0 && arithmOps(b),
q == paramExpName && paramOps(b),
q&allRbrack != 0 && b == ']':
p.tok, p.val = _LitWord, string(bs)
return
}
bs = append(bs, p.src[p.npos])
p.npos++
}
}
func (p *parser) advanceLitNone() {
var i int
tok := _Lit
p.asPos = 0
loop:
for i = p.npos; i < len(p.src); i++ {
switch p.src[i] {
case '\\': // escaped byte follows
if i == len(p.src)-1 {
break
}
if i++; p.src[i] == '\n' {
p.f.Lines = append(p.f.Lines, i+1)
bs := p.src[p.npos : i-1]
p.npos = i + 1
p.advanceLitNoneCont(bs)
return
}
case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')':
tok = _LitWord
break loop
case '?', '*', '+', '@', '!':
if p.bash() && i+1 < len(p.src) && p.src[i+1] == '(' {
break loop
}
case '`':
if p.quote == subCmdBckquo {
tok = _LitWord
}
break loop
case '"', '\'', '$':
break loop
case '=':
p.asPos = i - p.npos
if p.bash() && p.asPos > 0 && p.src[p.npos+p.asPos-1] == '+' {
p.asPos-- // a+=b
}
}
}
if i == len(p.src) {
tok = _LitWord
}
p.tok, p.val = tok, string(p.src[p.npos:i])
p.npos = i
}
func (p *parser) advanceLitNoneCont(bs []byte) {
for {
if p.npos >= len(p.src) {
p.tok, p.val = _LitWord, string(bs)
return
}
switch p.src[p.npos] {
case '\\': // escaped byte follows
if p.npos == len(p.src)-1 {
p.npos++
bs = append(bs, '\\')
p.tok, p.val = _LitWord, string(bs)
return
}
b := p.src[p.npos+1]
p.npos += 2
if b == '\n' {
p.f.Lines = append(p.f.Lines, p.npos)
} else {
bs = append(bs, '\\', b)
}
case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')':
p.tok, p.val = _LitWord, string(bs)
return
case '`':
if p.quote == subCmdBckquo {
p.tok, p.val = _LitWord, string(bs)
return
}
fallthrough
case '"', '\'', '$':
p.tok, p.val = _Lit, string(bs)
return
default:
bs = append(bs, p.src[p.npos])
p.npos++
}
}
}
func (p *parser) advanceLitDquote() {
var i int
tok := _Lit
loop:
for i = p.npos; i < len(p.src); i++ {
switch p.src[i] {
case '\\': // escaped byte follows
if i == len(p.src)-1 {
break
}
if i++; p.src[i] == '\n' {
p.f.Lines = append(p.f.Lines, i+1)
}
case '"':
tok = _LitWord
break loop
case '`', '$':
break loop
case '\n':
p.f.Lines = append(p.f.Lines, i+1)
}
}
p.tok, p.val = tok, string(p.src[p.npos:i])
p.npos = i
}
func (p *parser) isHdocEnd(i int) bool {
end := p.hdocStop
if end == nil || len(p.src) < i+len(end) {
return false
}
if !bytes.Equal(end, p.src[i:i+len(end)]) {
return false
}
return len(p.src) == i+len(end) || p.src[i+len(end)] == '\n'
}
func (p *parser) advanceLitHdoc() {
n := p.npos
if p.quote == hdocBodyTabs {
for n < len(p.src) && p.src[n] == '\t' {
n++
}
}
if p.isHdocEnd(n) {
if n > p.npos {
p.tok, p.val = _LitWord, string(p.src[p.npos:n])
}
p.npos = n + len(p.hdocStop)
p.hdocStop = nil
return
}
var i int
loop:
for i = p.npos; i < len(p.src); i++ {
switch p.src[i] {
case '\\': // escaped byte follows
if i++; i == len(p.src) {
break loop
}
if p.src[i] == '\n' {
p.f.Lines = append(p.f.Lines, i+1)
}
case '`', '$':
break loop
case '\n':
n := i + 1
p.f.Lines = append(p.f.Lines, n)
if p.quote == hdocBodyTabs {
for n < len(p.src) && p.src[n] == '\t' {
n++
}
}
if p.isHdocEnd(n) {
p.tok, p.val = _LitWord, string(p.src[p.npos:n])
p.npos = n + len(p.hdocStop)
p.hdocStop = nil
return
}
}
}
p.tok, p.val = _Lit, string(p.src[p.npos:i])
p.npos = i
}
func (p *parser) hdocLitWord() Word {
pos := p.npos
end := pos
for p.npos < len(p.src) {
end = p.npos
bs, found := p.readUntil('\n')
p.npos += len(bs) + 1
if found {
p.f.Lines = append(p.f.Lines, p.npos)
}
if p.quote == hdocBodyTabs {
for end < len(p.src) && p.src[end] == '\t' {
end++
}
}
if p.isHdocEnd(end) {
break
}
}
if p.npos == len(p.src) {
end = p.npos
}
l := p.lit(Pos(pos+1), string(p.src[pos:end]))
return Word{Parts: p.singleWps(l)}
}
func (p *parser) readUntil(b byte) ([]byte, bool) {
rem := p.src[p.npos:]
if i := bytes.IndexByte(rem, b); i >= 0 {
return rem[:i], true
}
return rem, false
}
func (p *parser) advanceLitRe() {
end := bytes.Index(p.src[p.npos:], []byte(" ]]"))
p.tok = _LitWord
if end == -1 {
p.val = string(p.src[p.npos:])
p.npos = len(p.src)
return
}
p.val = string(p.src[p.npos : p.npos+end])
p.npos += end
}
func testUnaryOp(val string) Token {
switch val {
case "!":
return Not
case "-e", "-a":
return tsExists
case "-f":
return tsRegFile
case "-d":
return tsDirect
case "-c":
return tsCharSp
case "-b":
return tsBlckSp
case "-p":
return tsNmPipe
case "-S":
return tsSocket
case "-L", "-h":
return tsSmbLink
case "-g":
return tsGIDSet
case "-u":
return tsUIDSet
case "-r":
return tsRead
case "-w":
return tsWrite
case "-x":
return tsExec
case "-s":
return tsNoEmpty
case "-t":
return tsFdTerm
case "-z":
return tsEmpStr
case "-n":
return tsNempStr
case "-o":
return tsOptSet
case "-v":
return tsVarSet
case "-R":
return tsRefVar
default:
return illegalTok
}
}
func testBinaryOp(val string) Token {
switch val {
case "=":
return Assgn
case "==":
return Eql
case "!=":
return Neq
case "=~":
return tsReMatch
case "-nt":
return tsNewer
case "-ot":
return tsOlder
case "-ef":
return tsDevIno
case "-eq":
return tsEql
case "-ne":
return tsNeq
case "-le":
return tsLeq
case "-ge":
return tsGeq
case "-lt":
return tsLss
case "-gt":
return tsGtr
default:
return illegalTok
}
}

View File

@ -1,717 +0,0 @@
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package syntax
// Node represents an AST node.
type Node interface {
// Pos returns the first character of the node
Pos() Pos
// End returns the character immediately after the node
End() Pos
}
// File is a shell program.
type File struct {
Name string
Stmts []*Stmt
Comments []*Comment
// Lines contains the offset of the first character for each
// line (the first entry is always 0)
Lines []int
}
func (f *File) Pos() Pos { return stmtFirstPos(f.Stmts) }
func (f *File) End() Pos { return stmtLastEnd(f.Stmts) }
func (f *File) Position(p Pos) (pos Position) {
intp := int(p)
pos.Offset = intp - 1
if i := searchInts(f.Lines, intp); i >= 0 {
pos.Line, pos.Column = i+1, intp-f.Lines[i]
}
return
}
// Inlined version of:
// sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
func searchInts(a []int, x int) int {
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}
func posMax(p1, p2 Pos) Pos {
if p2 > p1 {
return p2
}
return p1
}
// Comment represents a single comment on a single line.
type Comment struct {
Hash Pos
Text string
}
func (c *Comment) Pos() Pos { return c.Hash }
func (c *Comment) End() Pos { return posAddStr(c.Hash, c.Text) }
// Stmt represents a statement, otherwise known as a compound command.
// It is compromised of a command and other components that may come
// before or after it.
type Stmt struct {
Cmd Command
Position Pos
SemiPos Pos
Negated bool
Background bool
Assigns []*Assign
Redirs []*Redirect
}
func (s *Stmt) Pos() Pos { return s.Position }
func (s *Stmt) End() Pos {
if s.SemiPos > 0 {
return s.SemiPos + 1
}
end := s.Position
if s.Negated {
end = posAdd(end, 1)
}
if s.Cmd != nil {
end = s.Cmd.End()
}
if len(s.Assigns) > 0 {
assEnd := s.Assigns[len(s.Assigns)-1].End()
end = posMax(end, assEnd)
}
if len(s.Redirs) > 0 {
redEnd := s.Redirs[len(s.Redirs)-1].End()
end = posMax(end, redEnd)
}
return end
}
// Command represents all nodes that are simple commands, which are
// directly placed in a Stmt.
type Command interface {
Node
commandNode()
}
func (*CallExpr) commandNode() {}
func (*IfClause) commandNode() {}
func (*WhileClause) commandNode() {}
func (*UntilClause) commandNode() {}
func (*ForClause) commandNode() {}
func (*CaseClause) commandNode() {}
func (*Block) commandNode() {}
func (*Subshell) commandNode() {}
func (*BinaryCmd) commandNode() {}
func (*FuncDecl) commandNode() {}
func (*ArithmCmd) commandNode() {}
func (*TestClause) commandNode() {}
func (*DeclClause) commandNode() {}
func (*EvalClause) commandNode() {}
func (*LetClause) commandNode() {}
func (*CoprocClause) commandNode() {}
// Assign represents an assignment to a variable.
type Assign struct {
Append bool
Name *Lit
Value Word
}
func (a *Assign) Pos() Pos {
if a.Name == nil {
return a.Value.Pos()
}
return a.Name.Pos()
}
func (a *Assign) End() Pos {
if a.Name != nil {
return posMax(a.Name.End(), a.Value.End())
}
return a.Value.End()
}
// Redirect represents an input/output redirection.
type Redirect struct {
OpPos Pos
Op RedirOperator
N *Lit
Word, Hdoc Word
}
func (r *Redirect) Pos() Pos {
if r.N != nil {
return r.N.Pos()
}
return r.OpPos
}
func (r *Redirect) End() Pos { return r.Word.End() }
// CallExpr represents a command execution or function call.
type CallExpr struct {
Args []Word
}
func (c *CallExpr) Pos() Pos { return c.Args[0].Pos() }
func (c *CallExpr) End() Pos { return c.Args[len(c.Args)-1].End() }
// Subshell represents a series of commands that should be executed in a
// nested shell environment.
type Subshell struct {
Lparen, Rparen Pos
Stmts []*Stmt
}
func (s *Subshell) Pos() Pos { return s.Lparen }
func (s *Subshell) End() Pos { return posAdd(s.Rparen, 1) }
// Block represents a series of commands that should be executed in a
// nested scope.
type Block struct {
Lbrace, Rbrace Pos
Stmts []*Stmt
}
func (b *Block) Pos() Pos { return b.Rbrace }
func (b *Block) End() Pos { return posAdd(b.Rbrace, 1) }
// IfClause represents an if statement.
type IfClause struct {
If, Then, Fi Pos
CondStmts []*Stmt
ThenStmts []*Stmt
Elifs []*Elif
Else Pos
ElseStmts []*Stmt
}
func (c *IfClause) Pos() Pos { return c.If }
func (c *IfClause) End() Pos { return posAdd(c.Fi, 2) }
// Elif represents an "else if" case in an if clause.
type Elif struct {
Elif, Then Pos
CondStmts []*Stmt
ThenStmts []*Stmt
}
// WhileClause represents a while clause.
type WhileClause struct {
While, Do, Done Pos
CondStmts []*Stmt
DoStmts []*Stmt
}
func (w *WhileClause) Pos() Pos { return w.While }
func (w *WhileClause) End() Pos { return posAdd(w.Done, 4) }
// UntilClause represents an until clause.
type UntilClause struct {
Until, Do, Done Pos
CondStmts []*Stmt
DoStmts []*Stmt
}
func (u *UntilClause) Pos() Pos { return u.Until }
func (u *UntilClause) End() Pos { return posAdd(u.Done, 4) }
// ForClause represents a for clause.
type ForClause struct {
For, Do, Done Pos
Loop Loop
DoStmts []*Stmt
}
func (f *ForClause) Pos() Pos { return f.For }
func (f *ForClause) End() Pos { return posAdd(f.Done, 4) }
// Loop represents all nodes that can be loops in a for clause.
type Loop interface {
Node
loopNode()
}
func (*WordIter) loopNode() {}
func (*CStyleLoop) loopNode() {}
// WordIter represents the iteration of a variable over a series of
// words in a for clause.
type WordIter struct {
Name Lit
List []Word
}
func (w *WordIter) Pos() Pos { return w.Name.Pos() }
func (w *WordIter) End() Pos { return posMax(w.Name.End(), wordLastEnd(w.List)) }
// CStyleLoop represents the behaviour of a for clause similar to the C
// language.
//
// This node will never appear when in PosixConformant mode.
type CStyleLoop struct {
Lparen, Rparen Pos
Init, Cond, Post ArithmExpr
}
func (c *CStyleLoop) Pos() Pos { return c.Lparen }
func (c *CStyleLoop) End() Pos { return posAdd(c.Rparen, 2) }
// BinaryCmd represents a binary expression between two statements.
type BinaryCmd struct {
OpPos Pos
Op BinCmdOperator
X, Y *Stmt
}
func (b *BinaryCmd) Pos() Pos { return b.X.Pos() }
func (b *BinaryCmd) End() Pos { return b.Y.End() }
// FuncDecl represents the declaration of a function.
type FuncDecl struct {
Position Pos
BashStyle bool
Name Lit
Body *Stmt
}
func (f *FuncDecl) Pos() Pos { return f.Position }
func (f *FuncDecl) End() Pos { return f.Body.End() }
// Word represents a list of nodes that are contiguous to each other.
// The word is delimeted by word boundaries.
type Word struct {
Parts []WordPart
}
func (w *Word) Pos() Pos { return partsFirstPos(w.Parts) }
func (w *Word) End() Pos { return partsLastEnd(w.Parts) }
// WordPart represents all nodes that can form a word.
type WordPart interface {
Node
wordPartNode()
}
func (*Lit) wordPartNode() {}
func (*SglQuoted) wordPartNode() {}
func (*DblQuoted) wordPartNode() {}
func (*ParamExp) wordPartNode() {}
func (*CmdSubst) wordPartNode() {}
func (*ArithmExp) wordPartNode() {}
func (*ProcSubst) wordPartNode() {}
func (*ArrayExpr) wordPartNode() {}
func (*ExtGlob) wordPartNode() {}
// Lit represents an unquoted string consisting of characters that were
// not tokenized.
type Lit struct {
ValuePos Pos
Value string
}
func (l *Lit) Pos() Pos { return l.ValuePos }
func (l *Lit) End() Pos { return posAddStr(l.ValuePos, l.Value) }
// SglQuoted represents a string within single quotes.
type SglQuoted struct {
Position Pos
Dollar bool
Value string
}
func (q *SglQuoted) Pos() Pos { return q.Position }
func (q *SglQuoted) End() Pos {
pos := posAdd(q.Position, 2+len(q.Value))
if pos > 0 && q.Dollar {
pos++
}
return pos
}
// DblQuoted represents a list of nodes within double quotes.
type DblQuoted struct {
Position Pos
Dollar bool
Parts []WordPart
}
func (q *DblQuoted) Pos() Pos { return q.Position }
func (q *DblQuoted) End() Pos {
if q.Position == 0 {
return defaultPos
}
end := q.Position
if len(q.Parts) > 0 {
end = partsLastEnd(q.Parts)
} else if q.Dollar {
end += 2
} else {
end++
}
return posAdd(end, 1)
}
// CmdSubst represents a command substitution.
type CmdSubst struct {
Left, Right Pos
Stmts []*Stmt
}
func (c *CmdSubst) Pos() Pos { return c.Left }
func (c *CmdSubst) End() Pos { return posAdd(c.Right, 1) }
// ParamExp represents a parameter expansion.
type ParamExp struct {
Dollar, Rbrace Pos
Short, Length bool
Param Lit
Ind *Index
Slice *Slice
Repl *Replace
Exp *Expansion
}
func (p *ParamExp) Pos() Pos { return p.Dollar }
func (p *ParamExp) End() Pos {
if p.Rbrace > 0 {
return p.Rbrace + 1
}
return p.Param.End()
}
// Index represents access to an array via an index inside a ParamExp.
//
// This node will never appear when in PosixConformant mode.
type Index struct {
Word Word
}
// Slice represents character slicing inside a ParamExp.
//
// This node will never appear when in PosixConformant mode.
type Slice struct {
Offset, Length Word
}
// Replace represents a search and replace inside a ParamExp.
type Replace struct {
All bool
Orig, With Word
}
// Expansion represents string manipulation in a ParamExp other than
// those covered by Replace.
type Expansion struct {
Op ParExpOperator
Word Word
}
// ArithmExp represents an arithmetic expansion.
type ArithmExp struct {
Left, Right Pos
Bracket bool
X ArithmExpr
}
func (a *ArithmExp) Pos() Pos { return a.Left }
func (a *ArithmExp) End() Pos {
if a.Bracket {
return posAdd(a.Right, 1)
}
return posAdd(a.Right, 2)
}
// ArithmCmd represents an arithmetic command.
//
// This node will never appear when in PosixConformant mode.
type ArithmCmd struct {
Left, Right Pos
X ArithmExpr
}
func (a *ArithmCmd) Pos() Pos { return a.Left }
func (a *ArithmCmd) End() Pos { return posAdd(a.Right, 2) }
// ArithmExpr represents all nodes that form arithmetic expressions.
type ArithmExpr interface {
Node
arithmExprNode()
}
func (*BinaryArithm) arithmExprNode() {}
func (*UnaryArithm) arithmExprNode() {}
func (*ParenArithm) arithmExprNode() {}
func (*Word) arithmExprNode() {}
// BinaryArithm represents a binary expression between two arithmetic
// expression.
type BinaryArithm struct {
OpPos Pos
Op Token
X, Y ArithmExpr
}
func (b *BinaryArithm) Pos() Pos { return b.X.Pos() }
func (b *BinaryArithm) End() Pos { return b.Y.End() }
// UnaryArithm represents an unary expression over a node, either before
// or after it.
type UnaryArithm struct {
OpPos Pos
Op Token
Post bool
X ArithmExpr
}
func (u *UnaryArithm) Pos() Pos {
if u.Post {
return u.X.Pos()
}
return u.OpPos
}
func (u *UnaryArithm) End() Pos {
if u.Post {
return posAdd(u.OpPos, 2)
}
return u.X.End()
}
// ParenArithm represents an expression within parentheses inside an
// ArithmExp.
type ParenArithm struct {
Lparen, Rparen Pos
X ArithmExpr
}
func (p *ParenArithm) Pos() Pos { return p.Lparen }
func (p *ParenArithm) End() Pos { return posAdd(p.Rparen, 1) }
// CaseClause represents a case (switch) clause.
type CaseClause struct {
Case, Esac Pos
Word Word
List []*PatternList
}
func (c *CaseClause) Pos() Pos { return c.Case }
func (c *CaseClause) End() Pos { return posAdd(c.Esac, 4) }
// PatternList represents a pattern list (case) within a CaseClause.
type PatternList struct {
Op CaseOperator
OpPos Pos
Patterns []Word
Stmts []*Stmt
}
// TestClause represents a Bash extended test clause.
//
// This node will never appear when in PosixConformant mode.
type TestClause struct {
Left, Right Pos
X TestExpr
}
func (t *TestClause) Pos() Pos { return t.Left }
func (t *TestClause) End() Pos { return posAdd(t.Right, 2) }
// TestExpr represents all nodes that form arithmetic expressions.
type TestExpr interface {
Node
testExprNode()
}
func (*BinaryTest) testExprNode() {}
func (*UnaryTest) testExprNode() {}
func (*ParenTest) testExprNode() {}
func (*Word) testExprNode() {}
// BinaryTest represents a binary expression between two arithmetic
// expression.
type BinaryTest struct {
OpPos Pos
Op BinTestOperator
X, Y TestExpr
}
func (b *BinaryTest) Pos() Pos { return b.X.Pos() }
func (b *BinaryTest) End() Pos { return b.Y.End() }
// UnaryTest represents an unary expression over a node, either before
// or after it.
type UnaryTest struct {
OpPos Pos
Op UnTestOperator
X TestExpr
}
func (u *UnaryTest) Pos() Pos { return u.OpPos }
func (u *UnaryTest) End() Pos { return u.X.End() }
// ParenTest represents an expression within parentheses inside an
// TestExp.
type ParenTest struct {
Lparen, Rparen Pos
X TestExpr
}
func (p *ParenTest) Pos() Pos { return p.Lparen }
func (p *ParenTest) End() Pos { return posAdd(p.Rparen, 1) }
// DeclClause represents a Bash declare clause.
//
// This node will never appear when in PosixConformant mode.
type DeclClause struct {
Position Pos
Variant string
Opts []Word
Assigns []*Assign
}
func (d *DeclClause) Pos() Pos { return d.Position }
func (d *DeclClause) End() Pos {
end := wordLastEnd(d.Opts)
if len(d.Assigns) > 0 {
assignEnd := d.Assigns[len(d.Assigns)-1].End()
end = posMax(end, assignEnd)
}
return end
}
// ArrayExpr represents a Bash array expression.
//
// This node will never appear when in PosixConformant mode.
type ArrayExpr struct {
Lparen, Rparen Pos
List []Word
}
func (a *ArrayExpr) Pos() Pos { return a.Lparen }
func (a *ArrayExpr) End() Pos { return posAdd(a.Rparen, 1) }
// ExtGlob represents a Bash extended globbing expression. Note that
// these are parsed independently of whether shopt has been called or
// not.
//
// This node will never appear when in PosixConformant mode.
type ExtGlob struct {
Op GlobOperator
Pattern Lit
}
func (e *ExtGlob) Pos() Pos { return posAdd(e.Pattern.Pos(), -2) }
func (e *ExtGlob) End() Pos { return posAdd(e.Pattern.End(), 1) }
// ProcSubst represents a Bash process substitution.
//
// This node will never appear when in PosixConformant mode.
type ProcSubst struct {
OpPos, Rparen Pos
Op ProcOperator
Stmts []*Stmt
}
func (s *ProcSubst) Pos() Pos { return s.OpPos }
func (s *ProcSubst) End() Pos { return posAdd(s.Rparen, 1) }
// EvalClause represents a Bash eval clause.
//
// This node will never appear when in PosixConformant mode.
type EvalClause struct {
Eval Pos
Stmt *Stmt
}
func (e *EvalClause) Pos() Pos { return e.Eval }
func (e *EvalClause) End() Pos {
if e.Stmt == nil {
return posAdd(e.Eval, 4)
}
return e.Stmt.End()
}
// CoprocClause represents a Bash coproc clause.
//
// This node will never appear when in PosixConformant mode.
type CoprocClause struct {
Coproc Pos
Name *Lit
Stmt *Stmt
}
func (c *CoprocClause) Pos() Pos { return c.Coproc }
func (c *CoprocClause) End() Pos { return c.Stmt.End() }
// LetClause represents a Bash let clause.
//
// This node will never appear when in PosixConformant mode.
type LetClause struct {
Let Pos
Exprs []ArithmExpr
}
func (l *LetClause) Pos() Pos { return l.Let }
func (l *LetClause) End() Pos { return l.Exprs[len(l.Exprs)-1].End() }
func posAdd(pos Pos, n int) Pos {
if pos == defaultPos {
return pos
}
return pos + Pos(n)
}
func posAddStr(pos Pos, s string) Pos {
return posAdd(pos, len(s))
}
func stmtFirstPos(sts []*Stmt) Pos {
if len(sts) == 0 {
return defaultPos
}
return sts[0].Pos()
}
func stmtLastEnd(sts []*Stmt) Pos {
if len(sts) == 0 {
return defaultPos
}
return sts[len(sts)-1].End()
}
func partsFirstPos(ps []WordPart) Pos {
if len(ps) == 0 {
return defaultPos
}
return ps[0].Pos()
}
func partsLastEnd(ps []WordPart) Pos {
if len(ps) == 0 {
return defaultPos
}
return ps[len(ps)-1].End()
}
func wordLastEnd(ws []Word) Pos {
if len(ws) == 0 {
return defaultPos
}
return ws[len(ws)-1].End()
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,423 +0,0 @@
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package syntax
// Token is the set of lexical tokens and reserved words.
type Token int
// The list of all possible tokens and reserved words.
const (
illegalTok Token = iota
_EOF
_Lit
_LitWord
sglQuote // '
dblQuote // "
bckQuote // `
And // &
AndExpr // &&
OrExpr // ||
Or // |
pipeAll // |& - bash
dollar // $
dollSglQuote // $' - bash
dollDblQuote // $" - bash
dollBrace // ${
dollBrack // $[
dollParen // $(
dollDblParen // $((
leftBrack // [
leftParen // (
dblLeftParen // (( - bash
rightBrace // }
rightBrack // ]
rightParen // )
dblRightParen // ))
semicolon // ;
dblSemicolon // ;;
semiFall // ;& - bash
dblSemiFall // ;;& - bash
Mul // *
Not // !
Inc // ++
Dec // --
Pow // **
Eql // ==
Neq // !=
Leq // <=
Geq // >=
AddAssgn // +=
SubAssgn // -=
MulAssgn // *=
QuoAssgn // /=
RemAssgn // %=
AndAssgn // &=
OrAssgn // |=
XorAssgn // ^=
ShlAssgn // <<=
ShrAssgn // >>=
Gtr // >
Shr // >>
Lss // <
rdrInOut // <>
dplIn // <&
dplOut // >&
clbOut // >|
Shl // <<
dashHdoc // <<-
wordHdoc // <<< - bash
rdrAll // &> - bash
appAll // &>> - bash
cmdIn // <( - bash
cmdOut // >( - bash
Add // +
ColAdd // :+
Sub // -
ColSub // :-
Quest // ?
ColQuest // :?
Assgn // =
ColAssgn // :=
Rem // %
dblRem // %%
Hash // #
dblHash // ##
Xor // ^
dblXor // ^^ - bash
Comma // ,
dblComma // ,, - bash
Quo // /
dblQuo // //
Colon // :
tsNot // !
tsExists // -e
tsRegFile // -f
tsDirect // -d
tsCharSp // -c
tsBlckSp // -b
tsNmPipe // -p
tsSocket // -S
tsSmbLink // -L
tsGIDSet // -g
tsUIDSet // -u
tsRead // -r
tsWrite // -w
tsExec // -x
tsNoEmpty // -s
tsFdTerm // -t
tsEmpStr // -z
tsNempStr // -n
tsOptSet // -o
tsVarSet // -v
tsRefVar // -R
tsReMatch // =~
tsNewer // -nt
tsOlder // -ot
tsDevIno // -ef
tsEql // -eq
tsNeq // -ne
tsLeq // -le
tsGeq // -ge
tsLss // -lt
tsGtr // -gt
globQuest // ?(
globMul // *(
globAdd // +(
globAt // @(
globNot // !(
)
type RedirOperator Token
const (
RdrOut = RedirOperator(Gtr) + iota
AppOut
RdrIn
RdrInOut
DplIn
DplOut
ClbOut
Hdoc
DashHdoc
WordHdoc
RdrAll
AppAll
)
type ProcOperator Token
const (
CmdIn = ProcOperator(cmdIn) + iota
CmdOut
)
type GlobOperator Token
const (
GlobQuest = GlobOperator(globQuest) + iota
GlobMul
GlobAdd
GlobAt
GlobNot
)
type BinCmdOperator Token
const (
AndStmt = BinCmdOperator(AndExpr) + iota
OrStmt
Pipe
PipeAll
)
type CaseOperator Token
const (
DblSemicolon = CaseOperator(dblSemicolon) + iota
SemiFall
DblSemiFall
)
type ParExpOperator Token
const (
SubstAdd = ParExpOperator(Add) + iota
SubstColAdd
SubstSub
SubstColSub
SubstQuest
SubstColQuest
SubstAssgn
SubstColAssgn
RemSmallSuffix
RemLargeSuffix
RemSmallPrefix
RemLargePrefix
UpperFirst
UpperAll
LowerFirst
LowerAll
)
type UnTestOperator Token
const (
TsNot = UnTestOperator(tsNot) + iota
TsExists
TsRegFile
TsDirect
TsCharSp
TsBlckSp
TsNmPipe
TsSocket
TsSmbLink
TsGIDSet
TsUIDSet
TsRead
TsWrite
TsExec
TsNoEmpty
TsFdTerm
TsEmpStr
TsNempStr
TsOptSet
TsVarSet
TsRefVar
)
type BinTestOperator Token
const (
TsReMatch = BinTestOperator(tsReMatch) + iota
TsNewer
TsOlder
TsDevIno
TsEql
TsNeq
TsLeq
TsGeq
TsLss
TsGtr
AndTest = BinTestOperator(AndExpr)
OrTest = BinTestOperator(OrExpr)
TsAssgn = BinTestOperator(Assgn)
TsEqual = BinTestOperator(Eql)
TsNequal = BinTestOperator(Neq)
TsBefore = BinTestOperator(Lss)
TsAfter = BinTestOperator(Gtr)
)
func (o RedirOperator) String() string { return Token(o).String() }
func (o ProcOperator) String() string { return Token(o).String() }
func (o GlobOperator) String() string { return Token(o).String() }
func (o BinCmdOperator) String() string { return Token(o).String() }
func (o CaseOperator) String() string { return Token(o).String() }
func (o ParExpOperator) String() string { return Token(o).String() }
func (o UnTestOperator) String() string { return Token(o).String() }
func (o BinTestOperator) String() string { return Token(o).String() }
// Pos is the internal representation of a position within a source
// file.
type Pos int
var defaultPos Pos
const maxPos = Pos(^uint(0) >> 1)
// Position describes a position within a source file including the line
// and column location. A Position is valid if the line number is > 0.
type Position struct {
Offset int // byte offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (in bytes)
}
var tokNames = map[Token]string{
illegalTok: "illegal",
_EOF: "EOF",
_Lit: "Lit",
_LitWord: "LitWord",
sglQuote: "'",
dblQuote: `"`,
bckQuote: "`",
And: "&",
AndExpr: "&&",
OrExpr: "||",
Or: "|",
pipeAll: "|&",
dollar: "$",
dollSglQuote: "$'",
dollDblQuote: `$"`,
dollBrace: "${",
dollBrack: "$[",
dollParen: "$(",
dollDblParen: "$((",
leftBrack: "[",
leftParen: "(",
dblLeftParen: "((",
rightBrace: "}",
rightBrack: "]",
rightParen: ")",
dblRightParen: "))",
semicolon: ";",
dblSemicolon: ";;",
semiFall: ";&",
dblSemiFall: ";;&",
Gtr: ">",
Shr: ">>",
Lss: "<",
rdrInOut: "<>",
dplIn: "<&",
dplOut: ">&",
clbOut: ">|",
Shl: "<<",
dashHdoc: "<<-",
wordHdoc: "<<<",
rdrAll: "&>",
appAll: "&>>",
cmdIn: "<(",
cmdOut: ">(",
Add: "+",
ColAdd: ":+",
Sub: "-",
ColSub: ":-",
Quest: "?",
ColQuest: ":?",
Assgn: "=",
ColAssgn: ":=",
Rem: "%",
dblRem: "%%",
Hash: "#",
dblHash: "##",
Xor: "^",
dblXor: "^^",
Comma: ",",
dblComma: ",,",
Quo: "/",
dblQuo: "//",
Colon: ":",
Mul: "*",
Not: "!",
Inc: "++",
Dec: "--",
Pow: "**",
Eql: "==",
Neq: "!=",
Leq: "<=",
Geq: ">=",
AddAssgn: "+=",
SubAssgn: "-=",
MulAssgn: "*=",
QuoAssgn: "/=",
RemAssgn: "%=",
AndAssgn: "&=",
OrAssgn: "|=",
XorAssgn: "^=",
ShlAssgn: "<<=",
ShrAssgn: ">>=",
tsNot: "!",
tsExists: "-e",
tsRegFile: "-f",
tsDirect: "-d",
tsCharSp: "-c",
tsBlckSp: "-b",
tsNmPipe: "-p",
tsSocket: "-S",
tsSmbLink: "-L",
tsGIDSet: "-g",
tsUIDSet: "-u",
tsRead: "-r",
tsWrite: "-w",
tsExec: "-x",
tsNoEmpty: "-s",
tsFdTerm: "-t",
tsEmpStr: "-z",
tsNempStr: "-n",
tsOptSet: "-o",
tsVarSet: "-v",
tsRefVar: "-R",
tsReMatch: "=~",
tsNewer: "-nt",
tsOlder: "-ot",
tsDevIno: "-ef",
tsEql: "-eq",
tsNeq: "-ne",
tsLeq: "-le",
tsGeq: "-ge",
tsLss: "-lt",
tsGtr: "-gt",
globQuest: "?(",
globMul: "*(",
globAdd: "+(",
globAt: "@(",
globNot: "!(",
}
func (t Token) String() string { return tokNames[t] }

View File

@ -1,189 +0,0 @@
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package syntax
import "fmt"
// Visitor holds a Visit method which is invoked for each node
// encountered by Walk. If the result visitor w is not nil, Walk visits
// each of the children of node with the visitor w, followed by a call
// of w.Visit(nil).
type Visitor interface {
Visit(node Node) (w Visitor)
}
func walkStmts(v Visitor, stmts []*Stmt) {
for _, s := range stmts {
Walk(v, s)
}
}
func walkWords(v Visitor, words []Word) {
for i := range words {
Walk(v, &words[i])
}
}
// Walk traverses an AST in depth-first order: It starts by calling
// v.Visit(node); node must not be nil. If the visitor w returned by
// v.Visit(node) is not nil, Walk is invoked recursively with visitor w
// for each of the non-nil children of node, followed by a call of
// w.Visit(nil).
func Walk(v Visitor, node Node) {
if v = v.Visit(node); v == nil {
return
}
switch x := node.(type) {
case *File:
walkStmts(v, x.Stmts)
case *Stmt:
if x.Cmd != nil {
Walk(v, x.Cmd)
}
for _, a := range x.Assigns {
Walk(v, a)
}
for _, r := range x.Redirs {
Walk(v, r)
}
case *Assign:
if x.Name != nil {
Walk(v, x.Name)
}
Walk(v, &x.Value)
case *Redirect:
if x.N != nil {
Walk(v, x.N)
}
Walk(v, &x.Word)
if len(x.Hdoc.Parts) > 0 {
Walk(v, &x.Hdoc)
}
case *CallExpr:
walkWords(v, x.Args)
case *Subshell:
walkStmts(v, x.Stmts)
case *Block:
walkStmts(v, x.Stmts)
case *IfClause:
walkStmts(v, x.CondStmts)
walkStmts(v, x.ThenStmts)
for _, elif := range x.Elifs {
walkStmts(v, elif.CondStmts)
walkStmts(v, elif.ThenStmts)
}
walkStmts(v, x.ElseStmts)
case *WhileClause:
walkStmts(v, x.CondStmts)
walkStmts(v, x.DoStmts)
case *UntilClause:
walkStmts(v, x.CondStmts)
walkStmts(v, x.DoStmts)
case *ForClause:
Walk(v, x.Loop)
walkStmts(v, x.DoStmts)
case *WordIter:
Walk(v, &x.Name)
walkWords(v, x.List)
case *CStyleLoop:
if x.Init != nil {
Walk(v, x.Init)
}
if x.Cond != nil {
Walk(v, x.Cond)
}
if x.Post != nil {
Walk(v, x.Post)
}
case *BinaryCmd:
Walk(v, x.X)
Walk(v, x.Y)
case *FuncDecl:
Walk(v, &x.Name)
Walk(v, x.Body)
case *Word:
for _, wp := range x.Parts {
Walk(v, wp)
}
case *Lit:
case *SglQuoted:
case *DblQuoted:
for _, wp := range x.Parts {
Walk(v, wp)
}
case *CmdSubst:
walkStmts(v, x.Stmts)
case *ParamExp:
Walk(v, &x.Param)
if x.Ind != nil {
Walk(v, &x.Ind.Word)
}
if x.Repl != nil {
Walk(v, &x.Repl.Orig)
Walk(v, &x.Repl.With)
}
if x.Exp != nil {
Walk(v, &x.Exp.Word)
}
case *ArithmExp:
if x.X != nil {
Walk(v, x.X)
}
case *ArithmCmd:
if x.X != nil {
Walk(v, x.X)
}
case *BinaryArithm:
Walk(v, x.X)
Walk(v, x.Y)
case *BinaryTest:
Walk(v, x.X)
Walk(v, x.Y)
case *UnaryArithm:
Walk(v, x.X)
case *UnaryTest:
Walk(v, x.X)
case *ParenArithm:
Walk(v, x.X)
case *ParenTest:
Walk(v, x.X)
case *CaseClause:
Walk(v, &x.Word)
for _, pl := range x.List {
walkWords(v, pl.Patterns)
walkStmts(v, pl.Stmts)
}
case *TestClause:
Walk(v, x.X)
case *DeclClause:
walkWords(v, x.Opts)
for _, a := range x.Assigns {
Walk(v, a)
}
case *ArrayExpr:
walkWords(v, x.List)
case *ExtGlob:
Walk(v, &x.Pattern)
case *ProcSubst:
walkStmts(v, x.Stmts)
case *EvalClause:
if x.Stmt != nil {
Walk(v, x.Stmt)
}
case *CoprocClause:
if x.Name != nil {
Walk(v, x.Name)
}
Walk(v, x.Stmt)
case *LetClause:
for _, expr := range x.Exprs {
Walk(v, expr)
}
default:
panic(fmt.Sprintf("ast.Walk: unexpected node type %T", x))
}
v.Visit(nil)
}

13
vendor/manifest vendored
View File

@ -1,13 +0,0 @@
{
"version": 0,
"dependencies": [
{
"importpath": "github.com/mvdan/sh",
"repository": "https://github.com/mvdan/sh",
"vcs": "git",
"revision": "0a4761ee498179b93fe0efcd37758320c6ec73cd",
"branch": "HEAD",
"notests": true
}
]
}