From 46b8099b5cb2a9d8c97da62fd2a3b409f69292c7 Mon Sep 17 00:00:00 2001 From: Marcus Cobden Date: Fri, 28 Jul 2017 14:20:45 +0100 Subject: [PATCH] Squashed 'tools/' changes from 81d80f3..35679ee 35679ee Merge pull request #110 from weaveworks/parallel-push-errors 3ae41b6 Remove unneeded if block 51ff31a Exit on first error 0faad9f Check for errors when pushing images in parallel 74dc626 Merge pull request #108 from weaveworks/disable-apt-daily b4f1d91 Merge pull request #107 from weaveworks/docker-17-update 7436aa1 Override apt daily job to not run immediately on boot 7980f15 Merge pull request #106 from weaveworks/document-docker-install-role f741e53 Bump to Docker 17.06 from CE repo 61796a1 Update Docker CE Debian repo details 0d86f5e Allow for Docker package to be named docker-ce 065c68d Document selection of Docker installation role. 3809053 Just --porcelain; it defaults to v1 11400ea Merge pull request #105 from weaveworks/remove-weaveplugin-remnants b8b4d64 remove weaveplugin remnants 35099c9 Merge pull request #104 from weaveworks/pull-docker-py cdd48fc Pull docker-py to speed tests/builds up. e1c6c24 Merge pull request #103 from weaveworks/test-build-tags d5d71e0 Add -tags option so callers can pass in build tags 8949b2b Merge pull request #98 from weaveworks/git-status-tag ac30687 Merge pull request #100 from weaveworks/python_linting 4b125b5 Pin yapf & flake8 versions 7efb485 Lint python linting function 444755b Swap diff direction to reflect changes required c5b2434 Install flake8 & yapf 5600eac Lint python in build-tools repo 0b02ca9 Add python linting c011c0d Merge pull request #79 from kinvolk/schu/python-shebang 6577d07 Merge pull request #99 from weaveworks/shfmt-version 00ce0dc Use git status instead of diff to add 'WIP' tag 411fd13 Use shfmt v1.3.0 instead of latest from master. 0d6d4da Run shfmt 1.3 on the code. 5cdba32 Add sudo c322ca8 circle.yml: Install shfmt binary. e59c225 Install shfmt 1.3 binary. 30706e6 Install pyhcl in the build container. 960d222 Merge pull request #97 from kinvolk/alban/update-shfmt-3 1d535c7 shellcheck: fix escaping issue 5542498 Merge pull request #96 from kinvolk/alban/update-shfmt-2 32f7cc5 shfmt: fix coding style 09f72af lint: print the diff in case of error 571c7d7 Merge pull request #95 from kinvolk/alban/update-shfmt bead6ed Update for latest shfmt b08dc4d Update for latest shfmt (#94) 2ed8aaa Add no-race argument to test script (#92) 80dd78e Merge pull request #91 from weaveworks/upgrade-go-1.8.1 08dcd0d Please ./lint as shfmt changed its rules between 1.0.0 and 1.3.0. a8bc9ab Upgrade default Go version to 1.8.1. 41c5622 Merge pull request #90 from weaveworks/build-golang-service-conf e8ebdd5 broaden imagetag regex to fix haskell build image ba3fbfa Merge pull request #89 from weaveworks/build-golang-service-conf e506f1b Fix up test script for updated shfmt 9216db8 Add stuff for service-conf build to build-goland image 66a9a93 Merge pull request #88 from weaveworks/haskell-image cb3e3a2 shfmt 74a5239 Haskell build image 4ccd42b Trying circle quay login b2c295f Merge branch 'common-build' 0ac746f Trim quay prefix in circle script c405b31 Merge pull request #87 from weaveworks/common-build 9672d7c Push build images to quay as they have sane robot accounts a2bf112 Review feedback fef9b7d Add protobuf tools 10a77ea Update readme 254f266 Don't need the image name in ffb59fc Adding a weaveworks/build-golang image with tags b817368 Update min Weave Net docker version cf87ca3 Merge pull request #86 from weaveworks/lock-kubeadm-version 3ae6919 Add example of custom SSH private key to tf_ssh's usage. cf8bd8a Add example of custom SSH private key to tf_ansi's usage. c7d3370 Lock kubeadm's Kubernetes version. faaaa6f Merge pull request #84 from weaveworks/centos-rhel ef552e7 Select weave-kube YAML URL based on K8S version. b4c1198 Upgrade default kubernetes_version to 1.6.1. b82805e Use a fixed version of kubeadm. f33888b Factorise and make kubeconfig option optional. f7b8b89 Install EPEL repo for CentOS. 615917a Fix error in decrypting AWS access key and secret. 86f97b4 Add CentOS 7 AMI and username for AWS via Terraform. eafd810 Add tf_ansi example with Ansible variables. 2b05787 Skip setup of Docker over TCP for CentOS/RHEL. 84c420b Add docker-ce role for CentOS/RHEL. 00a820c Add setup_weave-net_debug.yml playbook for user issues' debugging. 3eae480 Upgrade default kubernetes_version to 1.5.4. 753921c Allow injection of Docker installation role. e1ff90d Fix kubectl taint command for 1.5. b989e97 Fix typo in kubectl taint for single node K8S cluster. 541f58d Remove 'install_recommends: no' for ethtool. c3f9711 Make Ansible role docker-from-get.docker.com work on RHEL/CentOS. 038c0ae Add frequently used OS images, for convenience. d30649f Add --insecure-registry to docker.conf 1dd9218 shfmt -i 4 -w push-images 6de96ac Add option to not push docker hub images 310f53d Add push-images script from cortex 8641381 Add port 6443 to kubeadm join commands for K8S 1.6+. 50bf0bc Force type of K8S token to string. 08ab1c0 Remove trailing whitespaces. ae9efb8 Enable testing against K8S release candidates. 9e32194 Secure GCP servers for Scope: open port 80. a22536a Secure GCP servers for Scope. 89c3a29 Merge pull request #78 from weaveworks/lint-merge-rebase-issue-in-docs 73ad56d Add linter function to avoid bad merge/rebase artefact 31d069d Change Python shebang to `#!/usr/bin/env python` 52d695c Merge pull request #77 from kinvolk/schu/fix-relative-weave-path 77aed01 Merge pull request #73 from weaveworks/mike/sched/fix-unicode-issue 7c080f4 integration/sanity_check: disable SC1090 d6d360a integration/gce.sh: update gcloud command e8def2c provisioning/setup: fix shellcheck SC2140 cc02224 integration/config: fix weave path 9c0d6a5 Fix config_management/README.md 334708c Merge pull request #75 from kinvolk/alban/external-build-1 da2505d gce.sh: template: print creation date e676854 integration tests: fix user account 8530836 host nameing: add repo name b556c0a gce.sh: fix deletion of gce instances 2ecd1c2 integration: fix GCE --zones/--zone parameter 3e863df sched: Fix unicode encoding issues 51785b5 Use rm -f and set current dir using BASH_SOURCE. f5c6d68 Merge pull request #71 from kinvolk/schu/fix-linter-warnings 0269628 Document requirement for `lint_sh` 9a3f09e Fix linter warnings efcf9d2 Merge pull request #53 from weaveworks/2647-testing-mvp d31ea57 Weave Kube playbook now works with multiple nodes. 27868dd Add GCP firewall rule for FastDP crypto. edc8bb3 Differentiated name of dev and test playbooks, to avoid confusion. efa3df7 Moved utility Ansible Yaml to library directory. fcd2769 Add shorthands to run Ansible playbooks against Terraform-provisioned virtual machines. f7946fb Add shorthands to SSH into Terraform-provisioned virtual machines. aad5c6f Mention Terraform and Ansible in README.md. dddabf0 Add Terraform output required for templates' creation. dcc7d02 Add Ansible configuration playbooks for development environments. f86481c Add Ansible configuration playbooks for Docker, K8S and Weave-Net. efedd25 Git-ignore Ansible retry files. 765c4ca Add helper functions to setup Terraform programmatically. 801dd1d Add Terraform cloud provisioning scripts. b8017e1 Install hclfmt on CircleCI. 4815e19 Git-ignore Terraform state files. 0aaebc7 Add script to generate cartesian product of dependencies of cross-version testing. 007d90a Add script to list OS images from GCP, AWS and DO. ca65cc0 Add script to list relevant versions of Go, Docker and Kubernetes. aa66f44 Scripts now source dependencies using absolute path (previously breaking make depending on current directory). 7865e86 Add -p option to parallelise lint. 36c1835 Merge pull request #69 from weaveworks/mflag 9857568 Use mflag and mflagext package from weaveworks/common. 9799112 Quote bash variable. 10a36b3 Merge pull request #67 from weaveworks/shfmt-ignore a59884f Add support for .lintignore. 03cc598 Don't lint generated protobuf code. 2b55c2d Merge pull request #66 from weaveworks/reduce-test-timeout d4e163c Make timeout a flag 49a8609 Reduce test timeout 8fa15cb Merge pull request #63 from weaveworks/test-defaults b783528 Tweak test script so it can be run on a mca a3b18bf Merge pull request #65 from weaveworks/fix-integration-tests ecb5602 Fix integration tests f9dcbf6 ... without tab (clearly not my day) a6215c3 Add break I forgot 0e6832d Remove incorrectly added tab eb26c68 Merge pull request #64 from weaveworks/remove-test-package-linting f088e83 Review feedback 2c6e83e Remove test package linting 2b3a1bb Merge pull request #62 from weaveworks/revert-61-test-defaults 8c3883a Revert "Make no-go-get the default, and don't assume -tags netgo" e75c226 Fix bug in GC of firewall rules. e49754e Merge pull request #51 from weaveworks/gc-firewall-rules 191f487 Add flag to enale/disable firewall rules' GC. 567905c Add GC of firewall rules for weave-net-tests to scheduler. 03119e1 Fix typo in GC of firewall rules. bbe3844 Fix regular expression for firewall rules. c5c23ce Pre-change refactoring: splitted gc_project function into smaller methods for better readability. ed5529f GC firewall rules ed8e757 Merge pull request #61 from weaveworks/test-defaults 57856e6 Merge pull request #56 from weaveworks/remove-wcloud dd5f3e6 Add -p flag to test, run test in parallel 62f6f94 Make no-go-get the default, and don't assume -tags netgo 8946588 Merge pull request #60 from weaveworks/2647-gc-weave-net-tests 4085df9 Scheduler now also garbage-collects VMs from weave-net-tests. 4b7d5c6 Merge pull request #59 from weaveworks/57-fix-lint-properly b7f0e69 Merge pull request #58 from weaveworks/fix-lint 794702c Pin version of shfmt ab1b11d Fix lint d1a5e46 Remove wcloud cli tool git-subtree-dir: tools git-subtree-split: 35679ee5ff17c4edf864b7c43dc70a40337fcd80 --- .gitignore | 5 +- README.md | 14 +- build/Makefile | 46 + build/golang/Dockerfile | 49 + build/golang/build.sh | 22 + build/haskell/Dockerfile | 4 + build/haskell/build.sh | 12 + build/haskell/copy-libraries | 41 + circle.yml | 28 +- cmd/wcloud/Makefile | 11 - cmd/wcloud/cli.go | 238 --- cmd/wcloud/client.go | 150 -- cmd/wcloud/types.go | 43 - config_management/README.md | 141 ++ config_management/group_vars/all | 11 + .../library/setup_ansible_dependencies.yml | 33 + .../dev-tools/files/apt-daily.timer.conf | 2 + .../roles/dev-tools/tasks/main.yml | 48 + .../docker-configuration/files/docker.conf | 3 + .../roles/docker-configuration/tasks/main.yml | 36 + .../tasks/debian.yml | 35 + .../docker-from-docker-ce-repo/tasks/main.yml | 10 + .../tasks/redhat.yml | 29 + .../docker-from-docker-repo/tasks/debian.yml | 35 + .../docker-from-docker-repo/tasks/main.yml | 10 + .../docker-from-docker-repo/tasks/redhat.yml | 25 + .../tasks/debian.yml | 8 + .../docker-from-get.docker.com/tasks/main.yml | 10 + .../tasks/redhat.yml | 11 + .../roles/docker-from-tarball/tasks/main.yml | 61 + .../roles/docker-from-tarball/vars/main.yml | 5 + .../roles/docker-install/tasks/main.yml | 30 + .../docker-prerequisites/tasks/debian.yml | 11 + .../roles/docker-prerequisites/tasks/main.yml | 5 + .../roles/golang-from-tarball/tasks/main.yml | 36 + .../roles/kubelet-stop/tasks/main.yml | 14 + .../kubernetes-docker-images/tasks/main.yml | 14 + .../roles/kubernetes-install/tasks/debian.yml | 37 + .../roles/kubernetes-install/tasks/main.yml | 16 + .../roles/kubernetes-install/tasks/redhat.yml | 30 + .../roles/kubernetes-start/tasks/main.yml | 42 + .../roles/setup-ansible/pre_tasks/main.yml | 26 + .../roles/sock-shop/tasks/tasks.yml | 34 + .../roles/weave-kube/tasks/main.yml | 24 + .../roles/weave-net-sources/tasks/main.yml | 24 + .../roles/weave-net-utilities/tasks/main.yml | 56 + .../roles/weave-net/tasks/main.yml | 26 + config_management/setup_weave-kube.yml | 27 + config_management/setup_weave-net_debug.yml | 18 + config_management/setup_weave-net_dev.yml | 20 + config_management/setup_weave-net_test.yml | 20 + dependencies/cross_versions.py | 91 + dependencies/list_os_images.sh | 85 + dependencies/list_versions.py | 343 ++++ image-tag | 2 +- integration/config.sh | 11 +- integration/gce.sh | 33 +- integration/sanity_check.sh | 4 +- lint | 126 +- provisioning/README.md | 55 + provisioning/aws/README.md | 90 + provisioning/aws/main.tf | 137 ++ provisioning/aws/outputs.tf | 54 + provisioning/aws/variables.tf | 68 + provisioning/do/README.md | 98 + provisioning/do/main.tf | 42 + provisioning/do/outputs.tf | 57 + provisioning/do/variables.tf | 185 ++ provisioning/gcp/README.md | 126 ++ provisioning/gcp/main.tf | 79 + provisioning/gcp/outputs.tf | 66 + provisioning/gcp/variables.tf | 77 + provisioning/setup.sh | 361 ++++ push-images | 53 + runner/runner.go | 5 +- sched | 12 +- scheduler/main.py | 240 ++- socks/main.go | 4 +- test | 69 +- vendor/github.com/mvdan/sh/LICENSE | 27 - vendor/github.com/mvdan/sh/cmd/shfmt/main.go | 202 -- vendor/github.com/mvdan/sh/syntax/doc.go | 6 - vendor/github.com/mvdan/sh/syntax/lexer.go | 962 ---------- vendor/github.com/mvdan/sh/syntax/nodes.go | 717 ------- vendor/github.com/mvdan/sh/syntax/parser.go | 1659 ----------------- vendor/github.com/mvdan/sh/syntax/printer.go | 1147 ------------ vendor/github.com/mvdan/sh/syntax/tokens.go | 423 ----- vendor/github.com/mvdan/sh/syntax/walk.go | 189 -- vendor/manifest | 13 - 89 files changed, 3647 insertions(+), 5957 deletions(-) create mode 100644 build/Makefile create mode 100644 build/golang/Dockerfile create mode 100755 build/golang/build.sh create mode 100644 build/haskell/Dockerfile create mode 100755 build/haskell/build.sh create mode 100755 build/haskell/copy-libraries delete mode 100644 cmd/wcloud/Makefile delete mode 100644 cmd/wcloud/cli.go delete mode 100644 cmd/wcloud/client.go delete mode 100644 cmd/wcloud/types.go create mode 100644 config_management/README.md create mode 100644 config_management/group_vars/all create mode 100644 config_management/library/setup_ansible_dependencies.yml create mode 100644 config_management/roles/dev-tools/files/apt-daily.timer.conf create mode 100644 config_management/roles/dev-tools/tasks/main.yml create mode 100644 config_management/roles/docker-configuration/files/docker.conf create mode 100644 config_management/roles/docker-configuration/tasks/main.yml create mode 100644 config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml create mode 100644 config_management/roles/docker-from-docker-ce-repo/tasks/main.yml create mode 100644 config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml create mode 100644 config_management/roles/docker-from-docker-repo/tasks/debian.yml create mode 100644 config_management/roles/docker-from-docker-repo/tasks/main.yml create mode 100644 config_management/roles/docker-from-docker-repo/tasks/redhat.yml create mode 100644 config_management/roles/docker-from-get.docker.com/tasks/debian.yml create mode 100644 config_management/roles/docker-from-get.docker.com/tasks/main.yml create mode 100644 config_management/roles/docker-from-get.docker.com/tasks/redhat.yml create mode 100644 config_management/roles/docker-from-tarball/tasks/main.yml create mode 100644 config_management/roles/docker-from-tarball/vars/main.yml create mode 100644 config_management/roles/docker-install/tasks/main.yml create mode 100644 config_management/roles/docker-prerequisites/tasks/debian.yml create mode 100644 config_management/roles/docker-prerequisites/tasks/main.yml create mode 100644 config_management/roles/golang-from-tarball/tasks/main.yml create mode 100644 config_management/roles/kubelet-stop/tasks/main.yml create mode 100644 config_management/roles/kubernetes-docker-images/tasks/main.yml create mode 100644 config_management/roles/kubernetes-install/tasks/debian.yml create mode 100644 config_management/roles/kubernetes-install/tasks/main.yml create mode 100644 config_management/roles/kubernetes-install/tasks/redhat.yml create mode 100644 config_management/roles/kubernetes-start/tasks/main.yml create mode 100644 config_management/roles/setup-ansible/pre_tasks/main.yml create mode 100644 config_management/roles/sock-shop/tasks/tasks.yml create mode 100644 config_management/roles/weave-kube/tasks/main.yml create mode 100644 config_management/roles/weave-net-sources/tasks/main.yml create mode 100644 config_management/roles/weave-net-utilities/tasks/main.yml create mode 100644 config_management/roles/weave-net/tasks/main.yml create mode 100644 config_management/setup_weave-kube.yml create mode 100644 config_management/setup_weave-net_debug.yml create mode 100644 config_management/setup_weave-net_dev.yml create mode 100644 config_management/setup_weave-net_test.yml create mode 100755 dependencies/cross_versions.py create mode 100755 dependencies/list_os_images.sh create mode 100755 dependencies/list_versions.py create mode 100755 provisioning/README.md create mode 100644 provisioning/aws/README.md create mode 100755 provisioning/aws/main.tf create mode 100755 provisioning/aws/outputs.tf create mode 100755 provisioning/aws/variables.tf create mode 100755 provisioning/do/README.md create mode 100755 provisioning/do/main.tf create mode 100755 provisioning/do/outputs.tf create mode 100755 provisioning/do/variables.tf create mode 100755 provisioning/gcp/README.md create mode 100755 provisioning/gcp/main.tf create mode 100755 provisioning/gcp/outputs.tf create mode 100755 provisioning/gcp/variables.tf create mode 100755 provisioning/setup.sh create mode 100755 push-images delete mode 100644 vendor/github.com/mvdan/sh/LICENSE delete mode 100644 vendor/github.com/mvdan/sh/cmd/shfmt/main.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/doc.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/lexer.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/nodes.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/parser.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/printer.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/tokens.go delete mode 100644 vendor/github.com/mvdan/sh/syntax/walk.go delete mode 100644 vendor/manifest diff --git a/.gitignore b/.gitignore index 635dff1..308ae9d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ cover/cover socks/proxy socks/image.tar runner/runner -cmd/wcloud/wcloud *.pyc *~ +terraform.tfstate +terraform.tfstate.backup +*.retry +build/**/.uptodate diff --git a/README.md b/README.md index e570ef7..9092b8e 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,17 @@ Included in this repo are tools shared by weave.git and scope.git. They include +- ```build```: a set of docker base-images for building weave + projects. These should be used instead of giving each project its + own build image. +- ```provisioning```: a set of Terraform scripts to provision virtual machines in GCP, AWS or Digital Ocean. +- ```config_management```: a set of Ansible playbooks to configure virtual machines for development, testing, etc. - ```cover```: a tool which merges overlapping coverage reports generated by go test - ```files-with-type```: a tool to search directories for files of a given MIME type -- ```lint```: a script to lint Go project; runs various tools like golint, go - vet, errcheck etc +- ```lint```: a script to lint go, sh and hcl files; runs various tools like + golint, go vet, errcheck, shellcheck etc - ```rebuild-image```: a script to rebuild docker images when their input files change; useful when you using docker images to build your software, but you don't want to build the image every time. @@ -24,6 +29,11 @@ Included in this repo are tools shared by weave.git and scope.git. They include - ```scheduler```: an appengine application that can be used to distribute tests across different shards in CircleCI. +## Requirements + +- ```lint``` requires shfmt to lint sh files; get shfmt with + ```go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt``` + ## Using build-tools.git To allow you to tie your code to a specific version of build-tools.git, such diff --git a/build/Makefile b/build/Makefile new file mode 100644 index 0000000..cea049b --- /dev/null +++ b/build/Makefile @@ -0,0 +1,46 @@ +.PHONY: all clean images +.DEFAULT_GOAL := all + +# Boiler plate for bulding Docker containers. +# All this must go at top of file I'm afraid. +IMAGE_PREFIX := quay.io/weaveworks/build- +IMAGE_TAG := $(shell ../image-tag) +UPTODATE := .uptodate + +# Every directory with a Dockerfile in it builds an image called +# $(IMAGE_PREFIX). Dependencies (i.e. things that go in the image) +# still need to be explicitly declared. +%/$(UPTODATE): %/Dockerfile %/* + $(SUDO) docker build -t $(IMAGE_PREFIX)$(shell basename $(@D)) $(@D)/ + $(SUDO) docker tag $(IMAGE_PREFIX)$(shell basename $(@D)) $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG) + touch $@ + +# Get a list of directories containing Dockerfiles +DOCKERFILES := $(shell find . -name tools -prune -o -name vendor -prune -o -type f -name 'Dockerfile' -print) +UPTODATE_FILES := $(patsubst %/Dockerfile,%/$(UPTODATE),$(DOCKERFILES)) +DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) +IMAGE_NAMES := $(foreach dir,$(DOCKER_IMAGE_DIRS),$(patsubst %,$(IMAGE_PREFIX)%,$(shell basename $(dir)))) +images: + $(info $(IMAGE_NAMES)) + @echo > /dev/null + +# Define imagetag-golang, etc, for each image, which parses the dockerfile and +# prints an image tag. For example: +# FROM golang:1.8.1-stretch +# in the "foo/Dockerfile" becomes: +# $ make imagetag-foo +# 1.8.1-stretch +define imagetag_dep +.PHONY: imagetag-$(1) +$(patsubst $(IMAGE_PREFIX)%,imagetag-%,$(1)): $(patsubst $(IMAGE_PREFIX)%,%,$(1))/Dockerfile + @cat $$< | grep "^FROM " | head -n1 | sed 's/FROM \(.*\):\(.*\)/\2/' +endef +$(foreach image, $(IMAGE_NAMES), $(eval $(call imagetag_dep, $(image)))) + +all: $(UPTODATE_FILES) + +clean: + $(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true + rm -rf $(UPTODATE_FILES) + + diff --git a/build/golang/Dockerfile b/build/golang/Dockerfile new file mode 100644 index 0000000..8ef1d2b --- /dev/null +++ b/build/golang/Dockerfile @@ -0,0 +1,49 @@ +FROM golang:1.8.0-stretch +RUN apt-get update && \ + apt-get install -y \ + curl \ + file \ + git \ + jq \ + libprotobuf-dev \ + make \ + protobuf-compiler \ + python-pip \ + python-requests \ + python-yaml \ + unzip && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +RUN pip install attrs pyhcl +RUN curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \ + echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \ + chmod +x shfmt && \ + mv shfmt /usr/bin +RUN go clean -i net && \ + go install -tags netgo std && \ + go install -race -tags netgo std +RUN go get -tags netgo \ + github.com/FiloSottile/gvt \ + github.com/client9/misspell/cmd/misspell \ + github.com/fatih/hclfmt \ + github.com/fzipp/gocyclo \ + github.com/gogo/protobuf/gogoproto \ + github.com/gogo/protobuf/protoc-gen-gogoslick \ + github.com/golang/dep/... \ + github.com/golang/lint/golint \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/kisielk/errcheck \ + github.com/mjibson/esc \ + github.com/prometheus/prometheus/cmd/promtool && \ + rm -rf /go/pkg /go/src +RUN mkdir protoc && \ + cd protoc && \ + curl -O -L https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip && \ + unzip protoc-3.1.0-linux-x86_64.zip && \ + cp bin/protoc /usr/bin/ && \ + chmod o+x /usr/bin/protoc && \ + cd .. && \ + rm -rf protoc +RUN mkdir -p /var/run/secrets/kubernetes.io/serviceaccount && \ + touch /var/run/secrets/kubernetes.io/serviceaccount/token +COPY build.sh / +ENTRYPOINT ["/build.sh"] diff --git a/build/golang/build.sh b/build/golang/build.sh new file mode 100755 index 0000000..cf70e1c --- /dev/null +++ b/build/golang/build.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +set -eu + +if [ -n "${SRC_NAME:-}" ]; then + SRC_PATH=${SRC_PATH:-$GOPATH/src/$SRC_NAME} +elif [ -z "${SRC_PATH:-}" ]; then + echo "Must set either \$SRC_NAME or \$SRC_PATH." + exit 1 +fi + +# If we run make directly, any files created on the bind mount +# will have awkward ownership. So we switch to a user with the +# same user and group IDs as source directory. We have to set a +# few things up so that sudo works without complaining later on. +uid=$(stat --format="%u" "$SRC_PATH") +gid=$(stat --format="%g" "$SRC_PATH") +echo "weave:x:$uid:$gid::$SRC_PATH:/bin/sh" >>/etc/passwd +echo "weave:*:::::::" >>/etc/shadow +echo "weave ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers + +su weave -c "PATH=$PATH make -C $SRC_PATH BUILD_IN_CONTAINER=false $*" diff --git a/build/haskell/Dockerfile b/build/haskell/Dockerfile new file mode 100644 index 0000000..8d40c66 --- /dev/null +++ b/build/haskell/Dockerfile @@ -0,0 +1,4 @@ +FROM fpco/stack-build:lts-8.9 +COPY build.sh / +COPY copy-libraries /usr/local/bin/ +ENTRYPOINT ["/build.sh"] diff --git a/build/haskell/build.sh b/build/haskell/build.sh new file mode 100755 index 0000000..e80d2ab --- /dev/null +++ b/build/haskell/build.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# Build a static Haskell binary using stack. + +set -eu + +if [ -z "${SRC_PATH:-}" ]; then + echo "Must set \$SRC_PATH." + exit 1 +fi + +make -C "$SRC_PATH" BUILD_IN_CONTAINER=false "$@" diff --git a/build/haskell/copy-libraries b/build/haskell/copy-libraries new file mode 100755 index 0000000..18cbba6 --- /dev/null +++ b/build/haskell/copy-libraries @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Copy dynamically linked libraries for a binary, so we can assemble a Docker +# image. +# +# Run with: +# copy-libraries /path/to/binary /output/dir +# +# Dependencies: +# - awk +# - cp +# - grep +# - ldd +# - mkdir + +set -o errexit +set -o nounset +set -o pipefail + +# Path to a Linux binary that we're going to run in the container. +binary_path="${1}" +# Path to directory to write the output to. +output_dir="${2}" + +exe_name=$(basename "${binary_path}") + +# Identify linked libraries. +libraries=($(ldd "${binary_path}" | awk '{print $(NF-1)}' | grep -v '=>')) +# Add /bin/sh, which we need for Docker imports. +libraries+=('/bin/sh') + +mkdir -p "${output_dir}" + +# Copy executable and all needed libraries into temporary directory. +cp "${binary_path}" "${output_dir}/${exe_name}" +for lib in "${libraries[@]}"; do + mkdir -p "${output_dir}/$(dirname "$lib")" + # Need -L to make sure we get actual libraries & binaries, not symlinks to + # them. + cp -L "${lib}" "${output_dir}/${lib}" +done diff --git a/circle.yml b/circle.yml index 3b3c917..976a68c 100644 --- a/circle.yml +++ b/circle.yml @@ -13,13 +13,19 @@ dependencies: - go install -tags netgo std - mkdir -p $(dirname $SRCDIR) - cp -r $(pwd)/ $SRCDIR + - | + curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \ + echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \ + chmod +x shfmt && \ + sudo mv shfmt /usr/bin - | cd $SRCDIR; go get \ github.com/fzipp/gocyclo \ github.com/golang/lint/golint \ github.com/kisielk/errcheck \ - ./vendor/github.com/mvdan/sh/cmd/shfmt + github.com/fatih/hclfmt + - pip install yapf==0.16.2 flake8==3.3.0 test: override: @@ -27,5 +33,23 @@ test: - cd $SRCDIR/cover; make - cd $SRCDIR/socks; make - cd $SRCDIR/runner; make - - cd $SRCDIR/cmd/wcloud; make + - cd $SRCDIR/build; make +deployment: + snapshot: + branch: master + commands: + - docker login -e "$DOCKER_REGISTRY_EMAIL" -u "$DOCKER_REGISTRY_USER" -p "$DOCKER_REGISTRY_PASS" "$DOCKER_REGISTRY_URL" + - | + cd $SRCDIR/build; + for image in $(make images); do + # Tag the built images with the revision of this repo. + docker push "${image}:${GIT_TAG}" + + # Tag the built images with something derived from the base images in + # their respective Dockerfiles. So "FROM golang:1.8.0-stretch" as a + # base image would lead to a tag of "1.8.0-stretch" + IMG_TAG=$(make "imagetag-${image#quay.io/weaveworks/build-}") + docker tag "${image}:latest" "${image}:${IMG_TAG}" + docker push "${image}:${IMG_TAG}" + done diff --git a/cmd/wcloud/Makefile b/cmd/wcloud/Makefile deleted file mode 100644 index 86b0df3..0000000 --- a/cmd/wcloud/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: all clean - -all: wcloud - -wcloud: *.go - go get ./$(@D) - go build -o $@ ./$(@D) - -clean: - rm -rf wcloud - go clean ./... diff --git a/cmd/wcloud/cli.go b/cmd/wcloud/cli.go deleted file mode 100644 index ba3c355..0000000 --- a/cmd/wcloud/cli.go +++ /dev/null @@ -1,238 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "os" - "os/user" - "path/filepath" - "strings" - "time" - - "github.com/olekukonko/tablewriter" - "gopkg.in/yaml.v2" -) - -// ArrayFlags allows you to collect repeated flags -type ArrayFlags []string - -func (a *ArrayFlags) String() string { - return strings.Join(*a, ",") -} - -// Set implements flags.Value -func (a *ArrayFlags) Set(value string) error { - *a = append(*a, value) - return nil -} - -func env(key, def string) string { - if val, ok := os.LookupEnv(key); ok { - return val - } - return def -} - -var ( - token = env("SERVICE_TOKEN", "") - baseURL = env("BASE_URL", "https://cloud.weave.works") -) - -func usage() { - fmt.Println(`Usage: - deploy : Deploy image to your configured env - list List recent deployments - config () Get (or set) the configured env - logs Show lots for the given deployment`) -} - -func main() { - if len(os.Args) <= 1 { - usage() - os.Exit(1) - } - - c := NewClient(token, baseURL) - - switch os.Args[1] { - case "deploy": - deploy(c, os.Args[2:]) - case "list": - list(c, os.Args[2:]) - case "config": - config(c, os.Args[2:]) - case "logs": - logs(c, os.Args[2:]) - case "events": - events(c, os.Args[2:]) - case "help": - usage() - default: - usage() - } -} - -func deploy(c Client, args []string) { - var ( - flags = flag.NewFlagSet("", flag.ContinueOnError) - username = flags.String("u", "", "Username to report to deploy service (default with be current user)") - services ArrayFlags - ) - flags.Var(&services, "service", "Service to update (can be repeated)") - if err := flags.Parse(args); err != nil { - usage() - return - } - args = flags.Args() - if len(args) != 1 { - usage() - return - } - parts := strings.SplitN(args[0], ":", 2) - if len(parts) < 2 { - usage() - return - } - if *username == "" { - user, err := user.Current() - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - *username = user.Username - } - deployment := Deployment{ - ImageName: parts[0], - Version: parts[1], - TriggeringUser: *username, - IntendedServices: services, - } - if err := c.Deploy(deployment); err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -func list(c Client, args []string) { - var ( - flags = flag.NewFlagSet("", flag.ContinueOnError) - since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results") - ) - if err := flags.Parse(args); err != nil { - usage() - return - } - through := time.Now() - from := through.Add(-*since) - deployments, err := c.GetDeployments(from.Unix(), through.Unix()) - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Created", "ID", "Image", "Version", "State"}) - table.SetBorder(false) - table.SetColumnSeparator(" ") - for _, deployment := range deployments { - table.Append([]string{ - deployment.CreatedAt.Format(time.RFC822), - deployment.ID, - deployment.ImageName, - deployment.Version, - deployment.State, - }) - } - table.Render() -} - -func events(c Client, args []string) { - var ( - flags = flag.NewFlagSet("", flag.ContinueOnError) - since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results") - ) - if err := flags.Parse(args); err != nil { - usage() - return - } - through := time.Now() - from := through.Add(-*since) - events, err := c.GetEvents(from.Unix(), through.Unix()) - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - fmt.Println("events: ", string(events)) -} - -func loadConfig(filename string) (*Config, error) { - extension := filepath.Ext(filename) - var config Config - buf, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - if extension == ".yaml" || extension == ".yml" { - if err := yaml.Unmarshal(buf, &config); err != nil { - return nil, err - } - } else { - if err := json.NewDecoder(bytes.NewReader(buf)).Decode(&config); err != nil { - return nil, err - } - } - return &config, nil -} - -func config(c Client, args []string) { - if len(args) > 1 { - usage() - return - } - - if len(args) == 1 { - config, err := loadConfig(args[0]) - if err != nil { - fmt.Println("Error reading config:", err) - os.Exit(1) - } - - if err := c.SetConfig(config); err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - } else { - config, err := c.GetConfig() - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - buf, err := yaml.Marshal(config) - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - fmt.Println(string(buf)) - } -} - -func logs(c Client, args []string) { - if len(args) != 1 { - usage() - return - } - - output, err := c.GetLogs(args[0]) - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - fmt.Println(string(output)) -} diff --git a/cmd/wcloud/client.go b/cmd/wcloud/client.go deleted file mode 100644 index 02cbbaa..0000000 --- a/cmd/wcloud/client.go +++ /dev/null @@ -1,150 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" -) - -// Client for the deployment service -type Client struct { - token string - baseURL string -} - -// NewClient makes a new Client -func NewClient(token, baseURL string) Client { - return Client{ - token: token, - baseURL: baseURL, - } -} - -func (c Client) newRequest(method, path string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, c.baseURL+path, body) - if err != nil { - return nil, err - } - req.Header.Add("Authorization", fmt.Sprintf("Scope-Probe token=%s", c.token)) - return req, nil -} - -// Deploy notifies the deployment service about a new deployment -func (c Client) Deploy(deployment Deployment) error { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(deployment); err != nil { - return err - } - req, err := c.newRequest("POST", "/api/deploy/deploy", &buf) - if err != nil { - return err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - if res.StatusCode != 204 { - return fmt.Errorf("error making request: %s", res.Status) - } - return nil -} - -// GetDeployments returns a list of deployments -func (c Client) GetDeployments(from, through int64) ([]Deployment, error) { - req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy?from=%d&through=%d", from, through), nil) - if err != nil { - return nil, err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - return nil, fmt.Errorf("error making request: %s", res.Status) - } - var response struct { - Deployments []Deployment `json:"deployments"` - } - if err := json.NewDecoder(res.Body).Decode(&response); err != nil { - return nil, err - } - return response.Deployments, nil -} - -// GetEvents returns the raw events. -func (c Client) GetEvents(from, through int64) ([]byte, error) { - req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/event?from=%d&through=%d", from, through), nil) - if err != nil { - return nil, err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - return nil, fmt.Errorf("error making request: %s", res.Status) - } - return ioutil.ReadAll(res.Body) -} - -// GetConfig returns the current Config -func (c Client) GetConfig() (*Config, error) { - req, err := c.newRequest("GET", "/api/config/deploy", nil) - if err != nil { - return nil, err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if res.StatusCode == 404 { - return nil, fmt.Errorf("no configuration uploaded yet") - } - if res.StatusCode != 200 { - return nil, fmt.Errorf("error making request: %s", res.Status) - } - var config Config - if err := json.NewDecoder(res.Body).Decode(&config); err != nil { - return nil, err - } - return &config, nil -} - -// SetConfig sets the current Config -func (c Client) SetConfig(config *Config) error { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(config); err != nil { - return err - } - req, err := c.newRequest("POST", "/api/config/deploy", &buf) - if err != nil { - return err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - if res.StatusCode != 204 { - return fmt.Errorf("error making request: %s", res.Status) - } - return nil -} - -// GetLogs returns the logs for a given deployment. -func (c Client) GetLogs(deployID string) ([]byte, error) { - req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy/%s/log", deployID), nil) - if err != nil { - return nil, err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - return nil, fmt.Errorf("error making request: %s", res.Status) - } - return ioutil.ReadAll(res.Body) -} diff --git a/cmd/wcloud/types.go b/cmd/wcloud/types.go deleted file mode 100644 index a068163..0000000 --- a/cmd/wcloud/types.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "time" -) - -// Deployment describes a deployment -type Deployment struct { - ID string `json:"id"` - CreatedAt time.Time `json:"created_at"` - ImageName string `json:"image_name"` - Version string `json:"version"` - Priority int `json:"priority"` - State string `json:"status"` - - TriggeringUser string `json:"triggering_user"` - IntendedServices []string `json:"intended_services"` -} - -// Config for the deployment system for a user. -type Config struct { - RepoURL string `json:"repo_url" yaml:"repo_url"` - RepoBranch string `json:"repo_branch" yaml:"repo_branch"` - RepoPath string `json:"repo_path" yaml:"repo_path"` - RepoKey string `json:"repo_key" yaml:"repo_key"` - KubeconfigPath string `json:"kubeconfig_path" yaml:"kubeconfig_path"` - AutoApply bool `json:"auto_apply" yaml:"auto_apply"` - - Notifications []NotificationConfig `json:"notifications" yaml:"notifications"` - - // Globs of files not to change, relative to the route of the repo - ConfigFileBlackList []string `json:"config_file_black_list" yaml:"config_file_black_list"` - - CommitMessageTemplate string `json:"commit_message_template" yaml:"commit_message_template"` // See https://golang.org/pkg/text/template/ -} - -// NotificationConfig describes how to send notifications -type NotificationConfig struct { - SlackWebhookURL string `json:"slack_webhook_url" yaml:"slack_webhook_url"` - SlackUsername string `json:"slack_username" yaml:"slack_username"` - MessageTemplate string `json:"message_template" yaml:"message_template"` - ApplyMessageTemplate string `json:"apply_message_template" yaml:"apply_message_template"` -} diff --git a/config_management/README.md b/config_management/README.md new file mode 100644 index 0000000..bf1f6f6 --- /dev/null +++ b/config_management/README.md @@ -0,0 +1,141 @@ +# Weaveworks configuration management + +## Introduction + +This project allows you to configure a machine with: + +* Docker and Weave Net for development: `setup_weave-net_dev.yml` +* Docker and Weave Net for testing: `setup_weave-net_test.yml` +* Docker, Kubernetes and Weave Kube (CNI plugin): `setup_weave-kube.yml` + +You can then use these environments for development, testing and debugging. + +## Set up + +You will need [Python](https://www.python.org/downloads/) and [Ansible 2.+](http://docs.ansible.com/ansible/intro_installation.html) installed on your machine and added to your `PATH` in order to be able to configure environments automatically. + +* On any platform, if you have Python installed: `pip install ansible` +* On macOS: `brew install ansible` +* On Linux (via Aptitude): `sudo apt install ansible` +* On Linux (via YUM): `sudo yum install ansible` +* For other platforms or more details, see [here](http://docs.ansible.com/ansible/intro_installation.html) + +Frequent errors during installation are: + +* `fatal error: Python.h: No such file or directory`: install `python-dev` +* `fatal error: ffi.h: No such file or directory`: install `libffi-dev` +* `fatal error: openssl/opensslv.h: No such file or directory`: install `libssl-dev` + +Full steps for a blank Ubuntu/Debian Linux machine: + + sudo apt-get install -qq -y python-pip python-dev libffi-dev libssl-dev + sudo pip install -U cffi + sudo pip install ansible + +## Tags + +These can be used to selectively run (`--tags "tag1,tag2"`) or skip (`--skip-tags "tag1,tag2"`) tasks. + + * `output`: print potentially useful output from hosts (e.g. output of `kubectl get pods --all-namespaces`) + +## Usage + +### Local machine + +``` +ansible-playbook -u -i "localhost", -c local setup_weave-kube.yml +``` + +### Vagrant + +Provision your local VM using Vagrant: + +``` +cd $(mktemp -d -t XXX) +vagrant init ubuntu/xenial64 # or, e.g. centos/7 +vagrant up +``` + +then set the following environment variables by extracting the output of `vagrant ssh-config`: + +``` +eval $(vagrant ssh-config | sed \ +-ne 's/\ *HostName /vagrant_ssh_host=/p' \ +-ne 's/\ *User /vagrant_ssh_user=/p' \ +-ne 's/\ *Port /vagrant_ssh_port=/p' \ +-ne 's/\ *IdentityFile /vagrant_ssh_id_file=/p') +``` + +and finally run: + +``` +ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \ +--ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \ +-i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml +``` + +or, for specific versions of Kubernetes and Docker: + +``` +ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \ +--ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \ +-i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml \ +--extra-vars "docker_version=1.12.3 kubernetes_version=1.4.4" +``` + +NOTE: Kubernetes APT repo includes only the latest version, so currently +retrieving an older version will fail. + +### Terraform + +Provision your machine using the Terraform scripts from `../provisioning`, then run: + +``` +terraform output ansible_inventory > /tmp/ansible_inventory +``` + +and + +``` +ansible-playbook \ + --private-key="$(terraform output private_key_path)" \ + -u "$(terraform output username)" \ + -i /tmp/ansible_inventory \ + --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \ + ../../config_management/setup_weave-kube.yml + +``` + +To specify versions of Kubernetes and Docker see Vagrant examples above. + +N.B.: `--ssh-extra-args` is used to provide: + +* `StrictHostKeyChecking=no`: as VMs come and go, the same IP can be used by a different machine, so checking the host's SSH key may fail. Note that this introduces a risk of a man-in-the-middle attack. +* `UserKnownHostsFile=/dev/null`: if you previously connected a VM with the same IP but a different public key, and added it to `~/.ssh/known_hosts`, SSH may still fail to connect, hence we use `/dev/null` instead of `~/.ssh/known_hosts`. + + +### Docker installation role + +Various ways to install Docker are provided: + +- `docker-from-docker-ce-repo` +- `docker-from-docker-repo` +- `docker-from-get.docker.com` +- `docker-from-tarball` + +each producing a slightly different outcome, which can be useful for testing various setup scenarios. + +The `docker-install` role selects one of the above ways to install Docker based on the `docker_install_role` variable. +The default value for this variable is configured in `group_vars/all`. +You can however override it with whichever role you would want to run by passing the name of the role as a key-value pair in `extra-vars`, e.g.: + +``` +ansible-playbook .yml \ + --extra-vars "docker_install_role=docker-from-docker-ce-repo" +``` + + +## Resources + +* [https://www.vagrantup.com/docs/provisioning/ansible.html](https://www.vagrantup.com/docs/provisioning/ansible.html) +* [http://docs.ansible.com/ansible/guide_vagrant.html](http://docs.ansible.com/ansible/guide_vagrant.html) diff --git a/config_management/group_vars/all b/config_management/group_vars/all new file mode 100644 index 0000000..d728cce --- /dev/null +++ b/config_management/group_vars/all @@ -0,0 +1,11 @@ +--- +go_version: 1.8.1 +terraform_version: 0.8.5 +docker_version: 17.06 +docker_install_role: 'docker-from-docker-ce-repo' +kubernetes_version: 1.6.1 +kubernetes_cni_version: 0.5.1 +kubernetes_token: '123456.0123456789123456' +etcd_container_version: 2.2.5 +kube_discovery_container_version: 1.0 +pause_container_version: 3.0 diff --git a/config_management/library/setup_ansible_dependencies.yml b/config_management/library/setup_ansible_dependencies.yml new file mode 100644 index 0000000..5026336 --- /dev/null +++ b/config_management/library/setup_ansible_dependencies.yml @@ -0,0 +1,33 @@ +--- +################################################################################ +# Install Ansible's dependencies: python and lsb_release, required respectively +# to run Ansible modules and gather Ansible facts. +# +# See also: +# - http://docs.ansible.com/ansible/intro_installation.html#managed-node-requirements +# - http://docs.ansible.com/ansible/setup_module.html +################################################################################ + +- name: check if python is installed (as required by ansible modules) + raw: test -e /usr/bin/python + register: is_python_installed + failed_when: is_python_installed.rc not in [0, 1] + changed_when: false # never mutates state. + +- name: install python if missing (as required by ansible modules) + when: is_python_installed|failed # skip otherwise + raw: (test -e /usr/bin/apt-get && apt-get update && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum update && yum install -y python) + changed_when: is_python_installed.rc == 1 + +- name: check if lsb_release is installed (as required for ansible facts) + raw: test -e /usr/bin/lsb_release + register: is_lsb_release_installed + failed_when: is_lsb_release_installed.rc not in [0, 1] + changed_when: false # never mutates state. + +- name: install lsb_release if missing (as required for ansible facts) + when: is_lsb_release_installed|failed # skip otherwise + raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y redhat-lsb-core) + changed_when: is_lsb_release_installed.rc == 1 + +- setup: # gather 'facts', i.e. compensates for 'gather_facts: false' in calling playbook. diff --git a/config_management/roles/dev-tools/files/apt-daily.timer.conf b/config_management/roles/dev-tools/files/apt-daily.timer.conf new file mode 100644 index 0000000..bd19c61 --- /dev/null +++ b/config_management/roles/dev-tools/files/apt-daily.timer.conf @@ -0,0 +1,2 @@ +[Timer] +Persistent=false diff --git a/config_management/roles/dev-tools/tasks/main.yml b/config_management/roles/dev-tools/tasks/main.yml new file mode 100644 index 0000000..96ac3a2 --- /dev/null +++ b/config_management/roles/dev-tools/tasks/main.yml @@ -0,0 +1,48 @@ +--- +# Set up Development Environment. + +- name: install development tools + package: + name: "{{ item }}" + state: present + with_items: + # weave net dependencies + - make + - vagrant + # ansible dependencies + - python-pip + - python-dev + - libffi-dev + - libssl-dev + # terraform dependencies + - unzip + # other potentially useful tools: + - aufs-tools + - ethtool + - iputils-arping + - libpcap-dev + - git + - mercurial + - bc + - jq + +- name: install ansible + pip: + name: ansible + state: present + +- name: install terraform + unarchive: + src: 'https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_linux_{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.zip' + remote_src: yes + dest: /usr/bin + mode: 0555 + creates: /usr/bin/terraform + +# Ubuntu runs an apt update process that will run on first boot from image. +# This is of questionable value when the machines are only going to live for a few minutes. +# If you leave them on they will run the process daily. +# Also we have seen the update process create a 'defunct' process which then throws off Weave Net smoke-test checks. +# So, we override the 'persistent' setting so it will still run at the scheduled time but will not try to catch up on first boot. +- name: copy apt daily override + copy: src=apt-daily.timer.conf dest=/etc/systemd/system/apt-daily.timer.d/ diff --git a/config_management/roles/docker-configuration/files/docker.conf b/config_management/roles/docker-configuration/files/docker.conf new file mode 100644 index 0000000..626d802 --- /dev/null +++ b/config_management/roles/docker-configuration/files/docker.conf @@ -0,0 +1,3 @@ +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay --insecure-registry "weave-ci-registry:5000" diff --git a/config_management/roles/docker-configuration/tasks/main.yml b/config_management/roles/docker-configuration/tasks/main.yml new file mode 100644 index 0000000..d673696 --- /dev/null +++ b/config_management/roles/docker-configuration/tasks/main.yml @@ -0,0 +1,36 @@ +--- +# Configure Docker +# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install + +- name: ensure docker group is present (or create it) + group: + name: docker + state: present + +- name: add user to docker group (avoids sudo-ing) + user: + name: "{{ ansible_user }}" + group: docker + state: present + +- name: ensure docker's systemd directory exists + file: + path: /etc/systemd/system/docker.service.d + state: directory + recurse: yes + when: ansible_os_family != "RedHat" + +- name: enable docker remote api over tcp + copy: + src: "{{ role_path }}/files/docker.conf" + dest: /etc/systemd/system/docker.service.d/docker.conf + register: docker_conf + when: ansible_os_family != "RedHat" + +- name: restart docker service + systemd: + name: docker + state: restarted + daemon_reload: yes # ensure docker.conf is picked up. + enabled: yes + when: docker_conf.changed or ansible_os_family == "RedHat" diff --git a/config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml b/config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml new file mode 100644 index 0000000..3e2ae12 --- /dev/null +++ b/config_management/roles/docker-from-docker-ce-repo/tasks/debian.yml @@ -0,0 +1,35 @@ +--- +# Debian / Ubuntu specific: + +- name: install dependencies for docker repository + package: + name: "{{ item }}" + state: present + with_items: + - apt-transport-https + - ca-certificates + +- name: add apt key for the docker repository + apt_key: + keyserver: hkp://ha.pool.sks-keyservers.net:80 + id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 + state: present + register: apt_key_docker_repo + +- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }}) + apt_repository: + repo: deb https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename|lower }} stable + state: present + register: apt_docker_repo + +- name: update apt's cache + apt: + update_cache: yes + when: apt_key_docker_repo.changed or apt_docker_repo.changed + +- name: install docker-engine + package: + name: "{{ item }}" + state: present + with_items: + - docker-ce={{ docker_version }}* diff --git a/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml b/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml new file mode 100644 index 0000000..0acb6d8 --- /dev/null +++ b/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml @@ -0,0 +1,10 @@ +--- +# Set up Docker +# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install + +# Distribution-specific tasks: +- include: debian.yml + when: ansible_os_family == "Debian" + +- include: redhat.yml + when: ansible_os_family == "RedHat" diff --git a/config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml b/config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml new file mode 100644 index 0000000..ea9a3fa --- /dev/null +++ b/config_management/roles/docker-from-docker-ce-repo/tasks/redhat.yml @@ -0,0 +1,29 @@ +# Docker installation from Docker's CentOS Community Edition +# See also: https://docs.docker.com/engine/installation/linux/centos/ + +- name: remove all potentially pre existing packages + yum: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-common + - container-selinux + - docker-selinux + - docker-engine + +- name: install yum-utils + yum: + name: yum-utils + state: present + +- name: add docker ce repo + command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + +# Note that Docker CE versions do not follow regular Docker versions, but look +# like, for example: "17.03.0.el7" +- name: install docker + yum: + name: 'docker-ce-{{ docker_version }}' + update_cache: yes + state: present diff --git a/config_management/roles/docker-from-docker-repo/tasks/debian.yml b/config_management/roles/docker-from-docker-repo/tasks/debian.yml new file mode 100644 index 0000000..cc33c2c --- /dev/null +++ b/config_management/roles/docker-from-docker-repo/tasks/debian.yml @@ -0,0 +1,35 @@ +--- +# Debian / Ubuntu specific: + +- name: install dependencies for docker repository + package: + name: "{{ item }}" + state: present + with_items: + - apt-transport-https + - ca-certificates + +- name: add apt key for the docker repository + apt_key: + keyserver: hkp://ha.pool.sks-keyservers.net:80 + id: 58118E89F3A912897C070ADBF76221572C52609D + state: present + register: apt_key_docker_repo + +- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }}) + apt_repository: + repo: deb https://apt.dockerproject.org/repo {{ ansible_distribution | lower }}-{{ ansible_distribution_release }} main + state: present + register: apt_docker_repo + +- name: update apt's cache + apt: + update_cache: yes + when: apt_key_docker_repo.changed or apt_docker_repo.changed + +- name: install docker-engine + package: + name: "{{ item }}" + state: present + with_items: + - docker-engine={{ docker_version }}* diff --git a/config_management/roles/docker-from-docker-repo/tasks/main.yml b/config_management/roles/docker-from-docker-repo/tasks/main.yml new file mode 100644 index 0000000..0acb6d8 --- /dev/null +++ b/config_management/roles/docker-from-docker-repo/tasks/main.yml @@ -0,0 +1,10 @@ +--- +# Set up Docker +# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install + +# Distribution-specific tasks: +- include: debian.yml + when: ansible_os_family == "Debian" + +- include: redhat.yml + when: ansible_os_family == "RedHat" diff --git a/config_management/roles/docker-from-docker-repo/tasks/redhat.yml b/config_management/roles/docker-from-docker-repo/tasks/redhat.yml new file mode 100644 index 0000000..d29964e --- /dev/null +++ b/config_management/roles/docker-from-docker-repo/tasks/redhat.yml @@ -0,0 +1,25 @@ +--- +# RedHat / CentOS specific: + +- name: add docker' yum repository (centos/{{ ansible_lsb.major_release }}) + yum_repository: + name: docker + description: Docker YUM repo + file: external_repos + baseurl: https://yum.dockerproject.org/repo/main/centos/{{ ansible_lsb.major_release }} + enabled: yes + gpgkey: https://yum.dockerproject.org/gpg + gpgcheck: yes + state: present + +- name: update yum's cache + yum: + name: "*" + update_cache: yes + +- name: install docker-engine + package: + name: "{{ item }}" + state: present + with_items: + - docker-engine-{{ docker_version }} diff --git a/config_management/roles/docker-from-get.docker.com/tasks/debian.yml b/config_management/roles/docker-from-get.docker.com/tasks/debian.yml new file mode 100644 index 0000000..7444194 --- /dev/null +++ b/config_management/roles/docker-from-get.docker.com/tasks/debian.yml @@ -0,0 +1,8 @@ +--- +# Debian / Ubuntu specific: + +- name: apt-import gpg key for the docker repository + shell: curl -sSL https://get.docker.com/gpg | sudo apt-key add - + +- name: install docker + shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine={{ docker_version }}*/ -e s/docker-ce/docker-ce={{ docker_version }}*/ | sh' diff --git a/config_management/roles/docker-from-get.docker.com/tasks/main.yml b/config_management/roles/docker-from-get.docker.com/tasks/main.yml new file mode 100644 index 0000000..92c497b --- /dev/null +++ b/config_management/roles/docker-from-get.docker.com/tasks/main.yml @@ -0,0 +1,10 @@ +--- +# Set up Docker +# See also: legacy gce.sh script + +# Distribution-specific tasks: +- include: debian.yml + when: ansible_os_family == "Debian" + +- include: redhat.yml + when: ansible_os_family == "RedHat" diff --git a/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml b/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml new file mode 100644 index 0000000..ea7cbfc --- /dev/null +++ b/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml @@ -0,0 +1,11 @@ +--- +# RedHat / CentOS specific: + +- name: rpm-import gpg key for the docker repository + shell: curl -sSLo /tmp/docker.gpg https://get.docker.com/gpg && sudo rpm --import /tmp/docker.gpg + +- name: install docker + shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine-{{ docker_version }}*/ | sh' + +- name: wait for docker installation to complete + shell: yum install -y yum-utils && yum-complete-transaction diff --git a/config_management/roles/docker-from-tarball/tasks/main.yml b/config_management/roles/docker-from-tarball/tasks/main.yml new file mode 100644 index 0000000..a233d10 --- /dev/null +++ b/config_management/roles/docker-from-tarball/tasks/main.yml @@ -0,0 +1,61 @@ +--- +# Set up Docker +# See also: +# - https://docs.docker.com/engine/installation/linux/ubuntulinux/#install +# - https://github.com/docker/docker/releases + +- include_role: + name: docker-prerequisites + +- name: install daemon + package: + name: daemon + state: present + +- name: 'create directory {{ docker_dir }}/{{ docker_version }}' + file: + path: '{{ docker_dir }}/{{ docker_version }}' + state: directory + mode: 0755 + +- name: download and extract docker + unarchive: + src: 'https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz' + remote_src: yes + dest: '{{ docker_dir }}/{{ docker_version }}' + extra_opts: '--strip-components=1' + mode: 0555 + creates: '{{ docker_dir }}/{{ docker_version }}/docker' + +- name: create symlink to current version + file: + src: '{{ docker_dir }}/{{ docker_version }}' + dest: '{{ docker_dir }}/current' + state: link + mode: 0555 + +- name: list all files to symlink + find: + paths: '{{ docker_dir }}/current' + file_type: file + register: binaries + changed_when: false + +- name: create symlinks to all binaries + file: + src: '{{ item }}' + dest: /usr/bin/{{ item | basename }} + state: link + with_items: "{{ binaries.files | map(attribute='path') | list }}" + +- name: killall docker + command: killall docker + register: killall + failed_when: false + changed_when: killall.rc == 0 + +- name: start dockerd + command: daemon -- /usr/bin/dockerd + +- include_role: + name: docker-configuration diff --git a/config_management/roles/docker-from-tarball/vars/main.yml b/config_management/roles/docker-from-tarball/vars/main.yml new file mode 100644 index 0000000..d410668 --- /dev/null +++ b/config_management/roles/docker-from-tarball/vars/main.yml @@ -0,0 +1,5 @@ +--- +docker_dir: '/opt/docker' +docker_url: '{{ "rc" in {{ docker_version }} | ternary( > + "https://test.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz", > + "https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz") }}' diff --git a/config_management/roles/docker-install/tasks/main.yml b/config_management/roles/docker-install/tasks/main.yml new file mode 100644 index 0000000..cc803ca --- /dev/null +++ b/config_management/roles/docker-install/tasks/main.yml @@ -0,0 +1,30 @@ +--- +# Set up Docker + +- include_role: + name: docker-prerequisites + +# Dynamically include docker installation role using 'when' as Ansible does not +# allow for include_role's name to be set to a variable. Indeed: +# - include_role: +# name: '{{ docker_install_role }}' +# fails with: +# ERROR! 'docker_install_role' is undefined +- include_role: + name: docker-from-docker-repo + when: docker_install_role == 'docker-from-docker-repo' + +- include_role: + name: docker-from-docker-ce-repo + when: docker_install_role == 'docker-from-docker-ce-repo' + +- include_role: + name: docker-from-get.docker.com + when: docker_install_role == 'docker-from-get.docker.com' + +- include_role: + name: docker-from-tarball + when: docker_install_role == 'docker-from-tarball' + +- include_role: + name: docker-configuration diff --git a/config_management/roles/docker-prerequisites/tasks/debian.yml b/config_management/roles/docker-prerequisites/tasks/debian.yml new file mode 100644 index 0000000..48b0c2e --- /dev/null +++ b/config_management/roles/docker-prerequisites/tasks/debian.yml @@ -0,0 +1,11 @@ +--- +# Install Docker's dependencies +# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install + +- name: install linux-image-extra-*/virtual + package: + name: "{{ item }}" + state: present + with_items: + - linux-image-extra-{{ ansible_kernel }} + - linux-image-extra-virtual diff --git a/config_management/roles/docker-prerequisites/tasks/main.yml b/config_management/roles/docker-prerequisites/tasks/main.yml new file mode 100644 index 0000000..a817737 --- /dev/null +++ b/config_management/roles/docker-prerequisites/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +# Distribution-specific tasks: +- include: debian.yml + when: ansible_os_family == "Debian" diff --git a/config_management/roles/golang-from-tarball/tasks/main.yml b/config_management/roles/golang-from-tarball/tasks/main.yml new file mode 100644 index 0000000..55476bf --- /dev/null +++ b/config_management/roles/golang-from-tarball/tasks/main.yml @@ -0,0 +1,36 @@ +--- +# Set up Go. + +- name: install go + unarchive: + src: 'https://storage.googleapis.com/golang/go{{ go_version }}.linux-{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.tar.gz' + remote_src: yes + dest: /usr/local + mode: 0777 + creates: /usr/local/go/bin/go + +- name: set go env. vars. and add go to path + blockinfile: + dest: '$HOME/.bashrc' + block: | + export PATH=$PATH:/usr/local/go/bin + export GOPATH=$HOME + state: present + create: yes + mode: 0644 + become: '{{ item }}' + with_items: + - true # Run as root + - false # Run as SSH user + +- name: source ~/.bashrc from ~/.bash_profile + lineinfile: + dest: '$HOME/.bash_profile' + line: '[ -r $HOME/.bashrc ] && source $HOME/.bashrc' + state: present + create: yes + mode: 0644 + become: '{{ item }}' + with_items: + - true # Run as root + - false # Run as SSH user diff --git a/config_management/roles/kubelet-stop/tasks/main.yml b/config_management/roles/kubelet-stop/tasks/main.yml new file mode 100644 index 0000000..6e5f314 --- /dev/null +++ b/config_management/roles/kubelet-stop/tasks/main.yml @@ -0,0 +1,14 @@ +--- + +- name: check if kubelet service exists + stat: + path: /etc/init.d/kubelet + register: kubelet + +# avoids having weave-net and weave-kube conflict in some test cases (e.g. 130_expose_test.sh) +- name: stop kubelet service + systemd: + name: kubelet + state: stopped + enabled: no + when: kubelet.stat.exists diff --git a/config_management/roles/kubernetes-docker-images/tasks/main.yml b/config_management/roles/kubernetes-docker-images/tasks/main.yml new file mode 100644 index 0000000..801c463 --- /dev/null +++ b/config_management/roles/kubernetes-docker-images/tasks/main.yml @@ -0,0 +1,14 @@ +--- + +- name: docker pull images used by k8s tests + docker_image: + name: '{{ item }}' + state: present + with_items: + - gcr.io/google_containers/etcd-amd64:{{ etcd_container_version }} + - gcr.io/google_containers/kube-apiserver-amd64:v{{ kubernetes_version }} + - gcr.io/google_containers/kube-controller-manager-amd64:v{{ kubernetes_version }} + - gcr.io/google_containers/kube-proxy-amd64:v{{ kubernetes_version }} + - gcr.io/google_containers/kube-scheduler-amd64:v{{ kubernetes_version }} + - gcr.io/google_containers/kube-discovery-amd64:{{ kube_discovery_container_version }} + - gcr.io/google_containers/pause-amd64:{{ pause_container_version }} diff --git a/config_management/roles/kubernetes-install/tasks/debian.yml b/config_management/roles/kubernetes-install/tasks/debian.yml new file mode 100644 index 0000000..9f16edf --- /dev/null +++ b/config_management/roles/kubernetes-install/tasks/debian.yml @@ -0,0 +1,37 @@ +--- +# Debian / Ubuntu specific: + +- name: add apt key for the kubernetes repository + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + register: apt_key_k8s_repo + +- name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }}) + apt_repository: + repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }} main + state: present + register: apt_k8s_repo + when: '"alpha" not in kubernetes_version and "beta" not in kubernetes_version' + +- name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }}-unstable) + apt_repository: + repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }}-unstable main + state: present + register: apt_k8s_repo + when: '"alpha" in kubernetes_version or "beta" in kubernetes_version' + +- name: update apt's cache + apt: + update_cache: yes + when: apt_key_k8s_repo.changed or apt_k8s_repo.changed + +- name: install kubelet and kubectl + package: + name: "{{ item }}" + state: present + with_items: + - kubelet={{ kubernetes_version }}* + - kubectl={{ kubernetes_version }}* + - kubeadm={{ kubernetes_version }}* + - kubernetes-cni={{ kubernetes_cni_version }}* diff --git a/config_management/roles/kubernetes-install/tasks/main.yml b/config_management/roles/kubernetes-install/tasks/main.yml new file mode 100644 index 0000000..50dcdda --- /dev/null +++ b/config_management/roles/kubernetes-install/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# Install Kubernetes + +# Distribution-specific tasks: +- include: debian.yml + when: ansible_os_family == "Debian" + +- include: redhat.yml + when: ansible_os_family == "RedHat" + +- name: install ebtables + package: + name: "{{ item }}" + state: present + with_items: + - ebtables diff --git a/config_management/roles/kubernetes-install/tasks/redhat.yml b/config_management/roles/kubernetes-install/tasks/redhat.yml new file mode 100644 index 0000000..293729d --- /dev/null +++ b/config_management/roles/kubernetes-install/tasks/redhat.yml @@ -0,0 +1,30 @@ +--- +# RedHat / CentOS specific: + +- name: add kubernetes' yum repository (kubernetes-el{{ ansible_lsb.major_release }}-x86-64) + yum_repository: + name: kubernetes + description: Kubernetes YUM repo + file: external_repos + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el{{ ansible_lsb.major_release }}-x86_64 + enabled: yes + gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + gpgcheck: yes + state: present + register: yum_k8s_repo + +- name: update yum's cache + yum: + name: "*" + update_cache: yes + when: yum_k8s_repo.changed + +- name: install kubelet and kubectl + package: + name: "{{ item }}" + state: present + with_items: + - kubelet-{{ kubernetes_version }}* + - kubectl-{{ kubernetes_version }}* + - kubeadm-{{ kubernetes_version }}* + - kubernetes-cni-{{ kubernetes_cni_version }}* diff --git a/config_management/roles/kubernetes-start/tasks/main.yml b/config_management/roles/kubernetes-start/tasks/main.yml new file mode 100644 index 0000000..d343b21 --- /dev/null +++ b/config_management/roles/kubernetes-start/tasks/main.yml @@ -0,0 +1,42 @@ +--- +# Start Kubernetes + +- name: kubeadm reset + command: kubeadm reset + +- name: restart kubelet service + systemd: + name: kubelet + state: restarted + enabled: yes + +- name: optionally set kubeconfig option + set_fact: + kubeconfig: '{{ (kubernetes_version >= "1.5.4") | ternary("--kubeconfig /etc/kubernetes/admin.conf", "") }}' + kubernetes_version_option: '{{ (kubernetes_version >= "1.6") | ternary("kubernetes_version", "use-kubernetes-version") }}' + +- name: kubeadm init on the master + command: 'kubeadm init --{{ kubernetes_version_option }}=v{{ kubernetes_version }} --token={{ kubernetes_token }}' + when: ' {{ play_hosts[0] == inventory_hostname }}' + +- name: allow pods to be run on the master (if only node) + command: 'kubectl {{ kubeconfig }} taint nodes --all {{ (kubernetes_version < "1.6") | ternary("dedicated-", "node-role.kubernetes.io/master:NoSchedule-") }}' + when: '{{ play_hosts | length }} == 1' + +- name: kubeadm join on workers + command: 'kubeadm join --token={{ kubernetes_token }} {{ hostvars[play_hosts[0]].private_ip }}{{ (kubernetes_version > "1.6") | ternary(":6443", "") }}' + when: ' {{ play_hosts[0] != inventory_hostname }}' + +- name: list kubernetes' pods + command: kubectl {{ kubeconfig }} get pods --all-namespaces + when: ' {{ play_hosts[0] == inventory_hostname }}' + changed_when: false + register: kubectl_get_pods + tags: + - output + +- name: print outpout of `kubectl get pods --all-namespaces` + debug: msg="{{ kubectl_get_pods.stdout_lines }}" + when: ' {{ play_hosts[0] == inventory_hostname }}' + tags: + - output diff --git a/config_management/roles/setup-ansible/pre_tasks/main.yml b/config_management/roles/setup-ansible/pre_tasks/main.yml new file mode 100644 index 0000000..efb1549 --- /dev/null +++ b/config_management/roles/setup-ansible/pre_tasks/main.yml @@ -0,0 +1,26 @@ +--- +# Set machine up to be able to run ansible playbooks. + +- name: check if python is installed (as required by ansible modules) + raw: test -e /usr/bin/python + register: is_python_installed + failed_when: is_python_installed.rc not in [0, 1] + changed_when: false # never mutates state. + +- name: install python if missing (as required by ansible modules) + when: is_python_installed|failed # skip otherwise + raw: (test -e /usr/bin/apt-get && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum install -y python) + changed_when: is_python_installed.rc == 1 + +- name: check if lsb_release is installed (as required for ansible facts) + raw: test -e /usr/bin/lsb_release + register: is_lsb_release_installed + failed_when: is_lsb_release_installed.rc not in [0, 1] + changed_when: false # never mutates state. + +- name: install lsb_release if missing (as required for ansible facts) + when: is_lsb_release_installed|failed # skip otherwise + raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y lsb_release) + changed_when: is_lsb_release_installed.rc == 1 + +- setup: # gather 'facts', i.e. compensates for the above 'gather_facts: false'. diff --git a/config_management/roles/sock-shop/tasks/tasks.yml b/config_management/roles/sock-shop/tasks/tasks.yml new file mode 100644 index 0000000..9667ab0 --- /dev/null +++ b/config_management/roles/sock-shop/tasks/tasks.yml @@ -0,0 +1,34 @@ +--- +# Set up sock-shop on top of Kubernetes. +# Dependencies on other roles: +# - kubernetes + +- name: create sock-shop namespace in k8s + command: kubectl --kubeconfig /etc/kubernetes/admin.conf create namespace sock-shop + +- name: create sock-shop in k8s + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -n sock-shop -f "https://github.com/microservices-demo/microservices-demo/blob/master/deploy/kubernetes/complete-demo.yaml?raw=true" + +- name: describe front-end service + command: kubectl --kubeconfig /etc/kubernetes/admin.conf describe svc front-end -n sock-shop + changed_when: false + register: kubectl_describe_svc_frontend + tags: + - output + +- name: print outpout of `kubectl describe svc front-end -n sock-shop` + debug: msg="{{ kubectl_describe_svc_frontend.stdout_lines }}" + tags: + - output + +- name: list sock-shop k8s' pods + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get pods -n sock-shop + changed_when: false + register: kubectl_get_pods + tags: + - output + +- name: print outpout of `kubectl get pods -n sock-shop` + debug: msg="{{ kubectl_get_pods.stdout_lines }}" + tags: + - output diff --git a/config_management/roles/weave-kube/tasks/main.yml b/config_management/roles/weave-kube/tasks/main.yml new file mode 100644 index 0000000..e2025ee --- /dev/null +++ b/config_management/roles/weave-kube/tasks/main.yml @@ -0,0 +1,24 @@ +--- +# Set up Weave Kube on top of Kubernetes. + +- name: set url for weave-kube daemonset + set_fact: + weave_kube_url: '{{ (kubernetes_version < "1.6") | ternary("https://git.io/weave-kube", "https://git.io/weave-kube-1.6") }}' + +- name: configure weave net's cni plugin + command: 'kubectl {{ kubeconfig }} apply -f {{ weave_kube_url }}' + when: '{{ play_hosts[0] == inventory_hostname }}' + +- name: list kubernetes' pods + command: 'kubectl {{ kubeconfig }} get pods --all-namespaces' + when: '{{ play_hosts[0] == inventory_hostname }}' + changed_when: false + register: kubectl_get_pods + tags: + - output + +- name: print outpout of `kubectl get pods --all-namespaces` + debug: msg="{{ kubectl_get_pods.stdout_lines }}" + when: '{{ play_hosts[0] == inventory_hostname }}' + tags: + - output diff --git a/config_management/roles/weave-net-sources/tasks/main.yml b/config_management/roles/weave-net-sources/tasks/main.yml new file mode 100644 index 0000000..b0a7815 --- /dev/null +++ b/config_management/roles/weave-net-sources/tasks/main.yml @@ -0,0 +1,24 @@ +--- +# Set up Development Environment for Weave Net. + +- name: check if weave net has been checked out + become: false # Run as SSH-user + stat: + path: $HOME/src/github.com/weaveworks/weave + register: weave + failed_when: false + changed_when: false + +- name: git clone weave net + become: false # Run as SSH-user + git: + repo: https://github.com/weaveworks/weave.git + dest: $HOME/src/github.com/weaveworks/weave + when: not weave.stat.exists + +- name: create a convenience symlink to $HOME/src/github.com/weaveworks/weave + become: false # Run as SSH-user + file: + src: $HOME/src/github.com/weaveworks/weave + dest: $HOME/weave + state: link diff --git a/config_management/roles/weave-net-utilities/tasks/main.yml b/config_management/roles/weave-net-utilities/tasks/main.yml new file mode 100644 index 0000000..6883d23 --- /dev/null +++ b/config_management/roles/weave-net-utilities/tasks/main.yml @@ -0,0 +1,56 @@ +--- + +- name: install epel-release + package: + name: "{{ item }}" + state: present + with_items: + - epel-release + when: ansible_os_family == "RedHat" + +- name: install jq + package: + name: "{{ item }}" + state: present + with_items: + - jq + +- name: install ethtool (used by the weave script) + package: + name: "{{ item }}" + state: present + with_items: + - ethtool + +- name: install nsenter (used by the weave script) + command: docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter + +- name: install pip (for docker-py) + package: + name: "{{ item }}" + state: present + with_items: + - python-pip + +- name: install docker-py (for docker_image) + pip: + name: docker-py + state: present + +- name: docker pull images used by tests + docker_image: + name: '{{ item }}' + state: present + with_items: + - alpine + - aanand/docker-dnsutils + - weaveworks/hello-world + +- name: docker pull docker-py which is used by tests + docker_image: + name: joffrey/docker-py + tag: '{{ item }}' + state: present + with_items: + - '1.8.1' + - '1.9.0-rc2' diff --git a/config_management/roles/weave-net/tasks/main.yml b/config_management/roles/weave-net/tasks/main.yml new file mode 100644 index 0000000..0ef5e35 --- /dev/null +++ b/config_management/roles/weave-net/tasks/main.yml @@ -0,0 +1,26 @@ +--- +# Set up Weave Net. + +- name: install weave net + get_url: + url: https://git.io/weave + dest: /usr/local/bin/weave + mode: 0555 + +- name: stop weave net + command: /usr/local/bin/weave stop + +- name: start weave net + command: /usr/local/bin/weave launch + +- name: get weave net's status + command: /usr/local/bin/weave status + changed_when: false + register: weave_status + tags: + - output + +- name: print outpout of `weave status` + debug: msg="{{ weave_status.stdout_lines }}" + tags: + - output diff --git a/config_management/setup_weave-kube.yml b/config_management/setup_weave-kube.yml new file mode 100644 index 0000000..5c68c97 --- /dev/null +++ b/config_management/setup_weave-kube.yml @@ -0,0 +1,27 @@ +--- +################################################################################ +# Install Docker and Kubernetes, and configure Kubernetes to +# use Weave Net's CNI plugin (a.k.a. Weave Kube). +# +# See also: +# - http://kubernetes.io/docs/getting-started-guides/kubeadm/ +# - https://github.com/weaveworks/weave-kube +################################################################################ + +- name: install docker, kubernetes and weave-kube + hosts: all + gather_facts: false # required in case Python is not available on the host + become: true + become_user: root + + pre_tasks: + - include: library/setup_ansible_dependencies.yml + + roles: + - docker-install + - weave-net-utilities + - kubernetes-install + - kubernetes-docker-images + - kubelet-stop + - kubernetes-start + - weave-kube diff --git a/config_management/setup_weave-net_debug.yml b/config_management/setup_weave-net_debug.yml new file mode 100644 index 0000000..ff73a52 --- /dev/null +++ b/config_management/setup_weave-net_debug.yml @@ -0,0 +1,18 @@ +--- +################################################################################ +# Install Docker from Docker's official repository and Weave Net. +################################################################################ + +- name: install docker and weave net for development + hosts: all + gather_facts: false # required in case Python is not available on the host + become: true + become_user: root + + pre_tasks: + - include: library/setup_ansible_dependencies.yml + + roles: + - docker-install + - weave-net-utilities + - weave-net diff --git a/config_management/setup_weave-net_dev.yml b/config_management/setup_weave-net_dev.yml new file mode 100644 index 0000000..bdfa08e --- /dev/null +++ b/config_management/setup_weave-net_dev.yml @@ -0,0 +1,20 @@ +--- +################################################################################ +# Install Docker from Docker's official repository and Weave Net. +################################################################################ + +- name: install docker and weave net for development + hosts: all + gather_facts: false # required in case Python is not available on the host + become: true + become_user: root + + pre_tasks: + - include: library/setup_ansible_dependencies.yml + + roles: + - dev-tools + - golang-from-tarball + - docker-install + # Do not run this role when building with Vagrant, as sources have been already checked out: + - { role: weave-net-sources, when: "ansible_user != 'vagrant'" } diff --git a/config_management/setup_weave-net_test.yml b/config_management/setup_weave-net_test.yml new file mode 100644 index 0000000..fbd155d --- /dev/null +++ b/config_management/setup_weave-net_test.yml @@ -0,0 +1,20 @@ +--- +################################################################################ +# Install Docker from Docker's official repository and Weave Net. +################################################################################ + +- name: install docker and weave net for testing + hosts: all + gather_facts: false # required in case Python is not available on the host + become: true + become_user: root + + pre_tasks: + - include: library/setup_ansible_dependencies.yml + + roles: + - docker-install + - weave-net-utilities + - kubernetes-install + - kubernetes-docker-images + - kubelet-stop diff --git a/dependencies/cross_versions.py b/dependencies/cross_versions.py new file mode 100755 index 0000000..dd920f0 --- /dev/null +++ b/dependencies/cross_versions.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python + +# Generate the cross product of latest versions of Weave Net's dependencies: +# - Go +# - Docker +# - Kubernetes +# +# Dependencies: +# - python +# - git +# - list_versions.py +# +# Testing: +# $ python -m doctest -v cross_versions.py + +from os import linesep +from sys import argv, exit, stdout, stderr +from getopt import getopt, GetoptError +from list_versions import DEPS, get_versions_from, filter_versions +from itertools import product + +# See also: /usr/include/sysexits.h +_ERROR_RUNTIME = 1 +_ERROR_ILLEGAL_ARGS = 64 + + +def _usage(error_message=None): + if error_message: + stderr.write('ERROR: ' + error_message + linesep) + stdout.write( + linesep.join([ + 'Usage:', ' cross_versions.py [OPTION]...', 'Examples:', + ' cross_versions.py', ' cross_versions.py -r', + ' cross_versions.py --rc', ' cross_versions.py -l', + ' cross_versions.py --latest', 'Options:', + '-l/--latest Include only the latest version of each major and' + ' minor versions sub-tree.', + '-r/--rc Include release candidate versions.', + '-h/--help Prints this!', '' + ])) + + +def _validate_input(argv): + try: + config = {'rc': False, 'latest': False} + opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc']) + for opt, value in opts: + if opt in ('-h', '--help'): + _usage() + exit() + if opt in ('-l', '--latest'): + config['latest'] = True + if opt in ('-r', '--rc'): + config['rc'] = True + if len(args) != 0: + raise ValueError('Unsupported argument(s): %s.' % args) + return config + except GetoptError as e: + _usage(str(e)) + exit(_ERROR_ILLEGAL_ARGS) + except ValueError as e: + _usage(str(e)) + exit(_ERROR_ILLEGAL_ARGS) + + +def _versions(dependency, config): + return map(str, + filter_versions( + get_versions_from(DEPS[dependency]['url'], + DEPS[dependency]['re']), + DEPS[dependency]['min'], **config)) + + +def cross_versions(config): + docker_versions = _versions('docker', config) + k8s_versions = _versions('kubernetes', config) + return product(docker_versions, k8s_versions) + + +def main(argv): + try: + config = _validate_input(argv) + print(linesep.join('\t'.join(triple) + for triple in cross_versions(config))) + except Exception as e: + print(str(e)) + exit(_ERROR_RUNTIME) + + +if __name__ == '__main__': + main(argv[1:]) diff --git a/dependencies/list_os_images.sh b/dependencies/list_os_images.sh new file mode 100755 index 0000000..00db0d0 --- /dev/null +++ b/dependencies/list_os_images.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +function usage() { + cat <&2 "No AWS owner ID for $1." + exit 1 +} + +if [ -z "$1" ]; then + echo >&2 "No specified provider." + usage + exit 1 +fi + +if [ -z "$2" ]; then + if [ "$1" == "help" ]; then + usage + exit 0 + else + echo >&2 "No specified operating system." + usage + exit 1 + fi +fi + +case "$1" in + 'gcp') + gcloud compute images list --standard-images --regexp=".*?$2.*" \ + --format="csv[no-heading][separator=/](selfLink.map().scope(projects).segment(0),family)" \ + | sort -d + ;; + 'aws') + aws --region "${3:-us-east-1}" ec2 describe-images \ + --owners "$(find_aws_owner_id "$2")" \ + --filters "Name=name,Values=$2*" \ + --query 'Images[*].{name:Name,id:ImageId}' + # Other examples: + # - CentOS: aws --region us-east-1 ec2 describe-images --owners aws-marketplace --filters Name=product-code,Values=aw0evgkw8e5c1q413zgy5pjce + # - Debian: aws --region us-east-1 ec2 describe-images --owners 379101102735 --filters "Name=architecture,Values=x86_64" "Name=name,Values=debian-jessie-*" "Name=root-device-type,Values=ebs" "Name=virtualization-type,Values=hvm" + ;; + 'do') + curl -s -X GET \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \ + "https://api.digitalocean.com/v2/images?page=1&per_page=999999" \ + | jq --raw-output ".images | .[] | .slug" | grep "$2" | sort -d + ;; + *) + echo >&2 "Unknown provider [$1]." + usage + exit 1 + ;; +esac diff --git a/dependencies/list_versions.py b/dependencies/list_versions.py new file mode 100755 index 0000000..e008ecf --- /dev/null +++ b/dependencies/list_versions.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python + +# List all available versions of Weave Net's dependencies: +# - Go +# - Docker +# - Kubernetes +# +# Depending on the parameters passed, it can gather the equivalent of the below +# bash one-liners: +# git ls-remote --tags https://github.com/golang/go \ +# | grep -oP '(?<=refs/tags/go)[\.\d]+$' \ +# | sort --version-sort +# git ls-remote --tags https://github.com/golang/go \ +# | grep -oP '(?<=refs/tags/go)[\.\d]+rc\d+$' \ +# | sort --version-sort \ +# | tail -n 1 +# git ls-remote --tags https://github.com/docker/docker \ +# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \ +# | sort --version-sort +# git ls-remote --tags https://github.com/docker/docker \ +# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-rc\d*$' \ +# | sort --version-sort \ +# | tail -n 1 +# git ls-remote --tags https://github.com/kubernetes/kubernetes \ +# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \ +# | sort --version-sort +# git ls-remote --tags https://github.com/kubernetes/kubernetes \ +# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-beta\.\d+$' \ +# | sort --version-sort | tail -n 1 +# +# Dependencies: +# - python +# - git +# +# Testing: +# $ python -m doctest -v list_versions.py + +from os import linesep, path +from sys import argv, exit, stdout, stderr +from getopt import getopt, GetoptError +from subprocess import Popen, PIPE +from pkg_resources import parse_version +from itertools import groupby +from six.moves import filter +import shlex +import re + +# See also: /usr/include/sysexits.h +_ERROR_RUNTIME = 1 +_ERROR_ILLEGAL_ARGS = 64 + +_TAG_REGEX = '^[0-9a-f]{40}\s+refs/tags/%s$' +_VERSION = 'version' +DEPS = { + 'go': { + 'url': 'https://github.com/golang/go', + 're': 'go(?P<%s>[\d\.]+(?:rc\d)*)' % _VERSION, + 'min': None + }, + 'docker': { + 'url': 'https://github.com/docker/docker', + 're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-rc\d)*)' % _VERSION, + # Weave Net only works with Docker from 1.10.0 onwards, so we ignore + # all previous versions: + 'min': '1.10.0', + }, + 'kubernetes': { + 'url': 'https://github.com/kubernetes/kubernetes', + 're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-beta\.\d)*)' % _VERSION, + # Weave Kube requires Kubernetes 1.4.2+, so we ignore all previous + # versions: + 'min': '1.4.2', + } +} + + +class Version(object): + ''' Helper class to parse and manipulate (sort, filter, group) software + versions. ''' + + def __init__(self, version): + self.version = version + self.digits = [ + int(x) if x else 0 + for x in re.match('(\d*)\.?(\d*)\.?(\d*).*?', version).groups() + ] + self.major, self.minor, self.patch = self.digits + self.__parsed = parse_version(version) + self.is_rc = self.__parsed.is_prerelease + + def __lt__(self, other): + return self.__parsed.__lt__(other.__parsed) + + def __gt__(self, other): + return self.__parsed.__gt__(other.__parsed) + + def __le__(self, other): + return self.__parsed.__le__(other.__parsed) + + def __ge__(self, other): + return self.__parsed.__ge__(other.__parsed) + + def __eq__(self, other): + return self.__parsed.__eq__(other.__parsed) + + def __ne__(self, other): + return self.__parsed.__ne__(other.__parsed) + + def __str__(self): + return self.version + + def __repr__(self): + return self.version + + +def _read_go_version_from_dockerfile(): + # Read Go version from weave/build/Dockerfile + dockerfile_path = path.join( + path.dirname(path.dirname(path.dirname(path.realpath(__file__)))), + 'build', 'Dockerfile') + with open(dockerfile_path, 'r') as f: + for line in f: + m = re.match('^FROM golang:(\S*)$', line) + if m: + return m.group(1) + raise RuntimeError( + "Failed to read Go version from weave/build/Dockerfile." + " You may be running this script from somewhere else than weave/tools." + ) + + +def _try_set_min_go_version(): + ''' Set the current version of Go used to build Weave Net's containers as + the minimum version. ''' + try: + DEPS['go']['min'] = _read_go_version_from_dockerfile() + except IOError as e: + stderr.write('WARNING: No minimum Go version set. Root cause: %s%s' % + (e, linesep)) + + +def _sanitize(out): + return out.decode('ascii').strip().split(linesep) + + +def _parse_tag(tag, version_pattern, debug=False): + ''' Parse Git tag output's line using the provided `version_pattern`, e.g.: + >>> _parse_tag( + '915b77eb4efd68916427caf8c7f0b53218c5ea4a refs/tags/v1.4.6', + 'v(?P\d+\.\d+\.\d+(?:\-beta\.\d)*)') + '1.4.6' + ''' + pattern = _TAG_REGEX % version_pattern + m = re.match(pattern, tag) + if m: + return m.group(_VERSION) + elif debug: + stderr.write( + 'ERROR: Failed to parse version out of tag [%s] using [%s].%s' % + (tag, pattern, linesep)) + + +def get_versions_from(git_repo_url, version_pattern): + ''' Get release and release candidates' versions from the provided Git + repository. ''' + git = Popen( + shlex.split('git ls-remote --tags %s' % git_repo_url), stdout=PIPE) + out, err = git.communicate() + status_code = git.returncode + if status_code != 0: + raise RuntimeError('Failed to retrieve git tags from %s. ' + 'Status code: %s. Output: %s. Error: %s' % + (git_repo_url, status_code, out, err)) + return list( + filter(None, (_parse_tag(line, version_pattern) + for line in _sanitize(out)))) + + +def _tree(versions, level=0): + ''' Group versions by major, minor and patch version digits. ''' + if not versions or level >= len(versions[0].digits): + return # Empty versions or no more digits to group by. + versions_tree = [] + for _, versions_group in groupby(versions, lambda v: v.digits[level]): + subtree = _tree(list(versions_group), level + 1) + if subtree: + versions_tree.append(subtree) + # Return the current subtree if non-empty, or the list of "leaf" versions: + return versions_tree if versions_tree else versions + + +def _is_iterable(obj): + ''' + Check if the provided object is an iterable collection, i.e. not a string, + e.g. a list, a generator: + >>> _is_iterable('string') + False + >>> _is_iterable([1, 2, 3]) + True + >>> _is_iterable((x for x in [1, 2, 3])) + True + ''' + return hasattr(obj, '__iter__') and not isinstance(obj, str) + + +def _leaf_versions(tree, rc): + ''' + Recursively traverse the versions tree in a depth-first fashion, + and collect the last node of each branch, i.e. leaf versions. + ''' + versions = [] + if _is_iterable(tree): + for subtree in tree: + versions.extend(_leaf_versions(subtree, rc)) + if not versions: + if rc: + last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None) + last_prod = next( + filter(lambda v: not v.is_rc, reversed(tree)), None) + if last_rc and last_prod and (last_prod < last_rc): + versions.extend([last_prod, last_rc]) + elif not last_prod: + versions.append(last_rc) + else: + # Either there is no RC, or we ignore the RC as older than + # the latest production version: + versions.append(last_prod) + else: + versions.append(tree[-1]) + return versions + + +def filter_versions(versions, min_version=None, rc=False, latest=False): + ''' Filter provided versions + + >>> filter_versions( + ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], + min_version=None, latest=False, rc=False) + [1.0.0, 1.0.1, 1.1.1, 2.0.0] + + >>> filter_versions( + ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], + min_version=None, latest=True, rc=False) + [1.0.1, 1.1.1, 2.0.0] + + >>> filter_versions( + ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], + min_version=None, latest=False, rc=True) + [1.0.0-beta.1, 1.0.0, 1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0] + + >>> filter_versions( + ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], + min_version='1.1.0', latest=False, rc=True) + [1.1.1, 1.1.2-rc1, 2.0.0] + + >>> filter_versions( + ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], + min_version=None, latest=True, rc=True) + [1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0] + + >>> filter_versions( + ['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], + min_version='1.1.0', latest=True, rc=True) + [1.1.1, 1.1.2-rc1, 2.0.0] + ''' + versions = sorted([Version(v) for v in versions]) + if min_version: + min_version = Version(min_version) + versions = [v for v in versions if v >= min_version] + if not rc: + versions = [v for v in versions if not v.is_rc] + if latest: + versions_tree = _tree(versions) + return _leaf_versions(versions_tree, rc) + else: + return versions + + +def _usage(error_message=None): + if error_message: + stderr.write('ERROR: ' + error_message + linesep) + stdout.write( + linesep.join([ + 'Usage:', ' list_versions.py [OPTION]... [DEPENDENCY]', + 'Examples:', ' list_versions.py go', + ' list_versions.py -r docker', + ' list_versions.py --rc docker', + ' list_versions.py -l kubernetes', + ' list_versions.py --latest kubernetes', 'Options:', + '-l/--latest Include only the latest version of each major and' + ' minor versions sub-tree.', + '-r/--rc Include release candidate versions.', + '-h/--help Prints this!', '' + ])) + + +def _validate_input(argv): + try: + config = {'rc': False, 'latest': False} + opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc']) + for opt, value in opts: + if opt in ('-h', '--help'): + _usage() + exit() + if opt in ('-l', '--latest'): + config['latest'] = True + if opt in ('-r', '--rc'): + config['rc'] = True + if len(args) != 1: + raise ValueError('Please provide a dependency to get versions of.' + ' Expected 1 argument but got %s: %s.' % + (len(args), args)) + dependency = args[0].lower() + if dependency not in DEPS.keys(): + raise ValueError( + 'Please provide a valid dependency.' + ' Supported one dependency among {%s} but got: %s.' % + (', '.join(DEPS.keys()), dependency)) + return dependency, config + except GetoptError as e: + _usage(str(e)) + exit(_ERROR_ILLEGAL_ARGS) + except ValueError as e: + _usage(str(e)) + exit(_ERROR_ILLEGAL_ARGS) + + +def main(argv): + try: + dependency, config = _validate_input(argv) + if dependency == 'go': + _try_set_min_go_version() + versions = get_versions_from(DEPS[dependency]['url'], + DEPS[dependency]['re']) + versions = filter_versions(versions, DEPS[dependency]['min'], **config) + print(linesep.join(map(str, versions))) + except Exception as e: + print(str(e)) + exit(_ERROR_RUNTIME) + + +if __name__ == '__main__': + main(argv[1:]) diff --git a/image-tag b/image-tag index d1fd2f7..31f023d 100755 --- a/image-tag +++ b/image-tag @@ -4,6 +4,6 @@ set -o errexit set -o nounset set -o pipefail -WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; then echo "-WIP"; else echo ""; fi) +WORKING_SUFFIX=$(if git status --porcelain | grep -qE '^(?:[^?][^ ]|[^ ][^?])\s'; then echo "-WIP"; else echo ""; fi) BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD) echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX" diff --git a/integration/config.sh b/integration/config.sh index 9bf47ea..5419219 100644 --- a/integration/config.sh +++ b/integration/config.sh @@ -81,7 +81,8 @@ run_on() { host=$1 shift 1 [ -z "$DEBUG" ] || greyly echo "Running on $host:" "$@" >&2 - remote "$host" "$SSH" "$host" "$@" + # shellcheck disable=SC2086 + remote "$host" $SSH "$host" "$@" } docker_on() { @@ -114,10 +115,8 @@ rm_containers() { start_suite() { for host in $HOSTS; do [ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave" - PLUGIN_ID=$(docker_on "$host" ps -aq --filter=name=weaveplugin) - PLUGIN_FILTER="cat" - [ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID" - rm_containers "$host" "$(docker_on "$host" ps -aq 2>/dev/null | "$PLUGIN_FILTER")" + # shellcheck disable=SC2046 + rm_containers "$host" $(docker_on "$host" ps -aq 2>/dev/null) run_on "$host" "docker network ls | grep -q ' weave ' && docker network rm weave" || true weave_on "$host" reset 2>/dev/null done @@ -128,4 +127,4 @@ end_suite() { whitely assert_end } -WEAVE=$DIR/../weave +WEAVE=$DIR/../../integration/weave diff --git a/integration/gce.sh b/integration/gce.sh index 5129916..5c39401 100755 --- a/integration/gce.sh +++ b/integration/gce.sh @@ -9,7 +9,9 @@ set -e : "${KEY_FILE:=/tmp/gce_private_key.json}" : "${SSH_KEY_FILE:=$HOME/.ssh/gce_ssh_key}" -: "${IMAGE:=ubuntu-14-04}" +: "${IMAGE_FAMILY:=ubuntu-1404-lts}" +: "${IMAGE_PROJECT:=ubuntu-os-cloud}" +: "${USER_ACCOUNT:=ubuntu}" : "${ZONE:=us-central1-a}" : "${PROJECT:=}" : "${TEMPLATE_NAME:=}" @@ -22,7 +24,9 @@ fi SUFFIX="" if [ -n "$CIRCLECI" ]; then - SUFFIX="-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX" + SUFFIX="-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX" +else + SUFFIX="-${USER}" fi # Setup authentication @@ -40,8 +44,13 @@ function vm_names() { # Delete all vms in this account function destroy() { local names + # shellcheck disable=SC2046 + if [ $(gcloud compute firewall-rules list "test-allow-docker$SUFFIX" 2>/dev/null | wc -l) -gt 0 ]; then + gcloud compute firewall-rules delete "test-allow-docker$SUFFIX" + fi names="$(vm_names)" - if [ "$(gcloud compute instances list --zone "$ZONE" -q "$names" | wc -l)" -le 1 ]; then + # shellcheck disable=SC2086 + if [ "$(gcloud compute instances list --zones "$ZONE" -q $names | wc -l)" -le 1 ]; then return 0 fi for i in {0..10}; do @@ -82,12 +91,16 @@ function try_connect() { function install_docker_on() { name=$1 + echo "Installing Docker on $name for user ${USER_ACCOUNT}" + # shellcheck disable=SC2087 ssh -t "$name" sudo bash -x -s <> /etc/default/docker; service docker restart EOF @@ -107,7 +120,10 @@ function setup() { destroy names=($(vm_names)) - gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE" + gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE" --tags "test$SUFFIX" --network=test + my_ip="$(curl -s http://ipinfo.io/ip)" + gcloud compute firewall-rules create "test-allow-docker$SUFFIX" --network=test --allow tcp:2375,tcp:12375,tcp:4040,tcp:80 --target-tags "test$SUFFIX" --source-ranges "$my_ip" + gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE" sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config @@ -136,7 +152,7 @@ function setup() { } function make_template() { - gcloud compute instances create "$TEMPLATE_NAME" --image "$IMAGE" --zone "$ZONE" + gcloud compute instances create "$TEMPLATE_NAME" --image-family "$IMAGE_FAMILY" --image-project "$IMAGE_PROJECT" --zone "$ZONE" gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE" name="$TEMPLATE_NAME.$ZONE.$PROJECT" try_connect "$name" @@ -155,7 +171,7 @@ function hosts() { hosts=($hostname "${hosts[@]}") args=("--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}") done - echo export SSH=\"ssh -l vagrant\" + echo export SSH=\"ssh -l "${USER_ACCOUNT}"\" echo "export HOSTS=\"${hosts[*]}\"" echo "export ADD_HOST_ARGS=\"${args[*]}\"" rm "$json" @@ -178,6 +194,9 @@ case "$1" in # see if template exists if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then make_template + else + echo "Reusing existing template:" + gcloud compute images describe "$TEMPLATE_NAME" | grep "^creationTimestamp" fi ;; esac diff --git a/integration/sanity_check.sh b/integration/sanity_check.sh index c88337f..192112d 100755 --- a/integration/sanity_check.sh +++ b/integration/sanity_check.sh @@ -1,6 +1,6 @@ #! /bin/bash -# shellcheck disable=SC1091 -. ./config.sh +# shellcheck disable=SC1090,SC1091 +. "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/config.sh" set -e diff --git a/lint b/lint index 15a97d4..63c5066 100755 --- a/lint +++ b/lint @@ -6,7 +6,7 @@ # # For shell files, it runs shfmt. If you don't have that installed, you can get # it with: -# go get -u github.com/mvdan/sh/cmd/shfmt +# go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt # # With no arguments, it lints the current files staged # for git commit. Or you can pass it explicit filenames @@ -17,9 +17,11 @@ set -e +LINT_IGNORE_FILE=${LINT_IGNORE_FILE:-".lintignore"} + IGNORE_LINT_COMMENT= -IGNORE_TEST_PACKAGES= IGNORE_SPELLINGS= +PARALLEL= while true; do case "$1" in -nocomment) @@ -27,13 +29,17 @@ while true; do shift 1 ;; -notestpackage) - IGNORE_TEST_PACKAGES=1 + # NOOP, still accepted for backwards compatibility shift 1 ;; -ignorespelling) IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS" shift 2 ;; + -p) + PARALLEL=1 + shift 1 + ;; *) break ;; @@ -65,30 +71,6 @@ spell_check() { return $lint_result } -test_mismatch() { - local filename="$1" - local package=$(grep '^package ' "$filename" | awk '{print $2}') - local lint_result=0 - - if [[ $package == "main" ]]; then - return # in package main, all bets are off - fi - - if [[ $filename == *"_internal_test.go" ]]; then - if [[ $package == *"_test" ]]; then - lint_result=1 - echo "${filename}: should not be part of a _test package" - fi - else - if [[ ! $package == *"_test" ]]; then - lint_result=1 - echo "${filename}: should be part of a _test package" - fi - fi - - return $lint_result -} - lint_go() { local filename="$1" local lint_result=0 @@ -131,7 +113,7 @@ lint_sh() { local filename="$1" local lint_result=0 - if ! diff <(shfmt -i 4 "${filename}") "${filename}" >/dev/null; then + if ! diff -u "${filename}" <(shfmt -i 4 "${filename}"); then lint_result=1 echo "${filename}: run shfmt -i 4 -w ${filename}" fi @@ -149,7 +131,7 @@ lint_tf() { local filename="$1" local lint_result=0 - if ! diff <(hclfmt "${filename}") "${filename}" >/dev/null; then + if ! diff -u <(hclfmt "${filename}") "${filename}"; then lint_result=1 echo "${filename}: run hclfmt -w ${filename}" fi @@ -157,6 +139,35 @@ lint_tf() { return $lint_result } +lint_md() { + local filename="$1" + local lint_result=0 + + for i in '=======' '>>>>>>>'; do + if grep -q "${i}" "${filename}"; then + lint_result=1 + echo "${filename}: bad merge/rebase!" + fi + done + + return $lint_result +} + +lint_py() { + local filename="$1" + local lint_result=0 + + if yapf --diff "${filename}" | grep -qE '^[+-]'; then + lint_result=1 + echo "${filename}: run yapf --in-place ${filename}" + else + # Only run flake8 if yapf passes, since they pick up a lot of similar issues + flake8 "${filename}" || lint_result=1 + fi + + return $lint_result +} + lint() { filename="$1" ext="${filename##*\.}" @@ -167,13 +178,14 @@ lint() { return fi - # Don't lint static.go + # Don't lint specific files case "$(basename "${filename}")" in static.go) return ;; coverage.html) return ;; + *.pb.go) return ;; esac - if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" = "text/x-shellscript" ]]; then + if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" == "text/x-shellscript" ]]; then ext="sh" fi @@ -181,14 +193,10 @@ lint() { go) lint_go "${filename}" || lint_result=1 ;; sh) lint_sh "${filename}" || lint_result=1 ;; tf) lint_tf "${filename}" || lint_result=1 ;; + md) lint_md "${filename}" || lint_result=1 ;; + py) lint_py "${filename}" || lint_result=1 ;; esac - if [ -z "$IGNORE_TEST_PACKAGES" ]; then - if [[ "$filename" == *"_test.go" ]]; then - test_mismatch "${filename}" || lint_result=1 - fi - fi - spell_check "${filename}" || lint_result=1 return $lint_result @@ -202,12 +210,46 @@ lint_files() { exit $lint_result } -list_files() { - if [ $# -gt 0 ]; then - git ls-files --exclude-standard | grep -vE '(^|/)vendor/' +matches_any() { + local filename="$1" + local patterns="$2" + while read -r pattern; do + # shellcheck disable=SC2053 + # Use the [[ operator without quotes on $pattern + # in order to "glob" the provided filename: + [[ "$filename" == $pattern ]] && return 0 + done <<<"$patterns" + return 1 +} + +filter_out() { + local patterns_file="$1" + if [ -n "$patterns_file" ] && [ -r "$patterns_file" ]; then + local patterns + patterns=$(sed '/^#.*$/d ; /^\s*$/d' "$patterns_file") # Remove blank lines and comments before we start iterating. + [ -n "$DEBUG" ] && echo >&2 "> Filters:" && echo >&2 "$patterns" + local filtered_out=() + while read -r filename; do + matches_any "$filename" "$patterns" && filtered_out+=("$filename") || echo "$filename" + done + [ -n "$DEBUG" ] && echo >&2 "> Files filtered out (i.e. NOT linted):" && printf >&2 '%s\n' "${filtered_out[@]}" else - git diff --cached --name-only + cat # No patterns provided: simply propagate stdin to stdout. fi } -list_files "$@" | lint_files +list_files() { + if [ $# -gt 0 ]; then + find "$@" | grep -vE '(^|/)vendor/' + else + git ls-files --exclude-standard | grep -vE '(^|/)vendor/' + fi +} + +if [ $# = 1 ] && [ -f "$1" ]; then + lint "$1" +elif [ -n "$PARALLEL" ]; then + list_files "$@" | filter_out "$LINT_IGNORE_FILE" | xargs -n1 -P16 "$0" +else + list_files "$@" | filter_out "$LINT_IGNORE_FILE" | lint_files +fi diff --git a/provisioning/README.md b/provisioning/README.md new file mode 100755 index 0000000..627bb42 --- /dev/null +++ b/provisioning/README.md @@ -0,0 +1,55 @@ +# Weaveworks provisioning + +## Introduction + +This project allows you to get hold of some machine either locally or on one of the below cloud providers: + +* Amazon Web Services +* Digital Ocean +* Google Cloud Platform + +You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. + +## Set up + +* You will need [Vagrant](https://www.vagrantup.com) installed on your machine and added to your `PATH` in order to be able to provision local (virtual) machines automatically. + + * On macOS: `brew install vagrant` + * On Linux (via Aptitude): `sudo apt install vagrant` + * If you need a specific version: + + curl -fsS https://releases.hashicorp.com/terraform/x.y.z/terraform_x.y.z_linux_amd64.zip | gunzip > terraform && chmod +x terraform && sudo mv terraform /usr/bin + + * For other platforms or more details, see [here](https://www.vagrantup.com/docs/installation/) + +* You will need [Terraform](https://www.terraform.io) installed on your machine and added to your `PATH` in order to be able to provision cloud-hosted machines automatically. + + * On macOS: `brew install terraform` + * On Linux (via Aptitude): `sudo apt install terraform` + * For other platforms or more details, see [here](https://www.terraform.io/intro/getting-started/install.html) + +* Depending on the cloud provider, you may have to create an account, manually onboard, create and register SSH keys, etc. + Please refer to the `README.md` in each sub-folder for more details. + +## Usage in scripts + +Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either: + +* `gcp_on` / `gcp_off` +* `do_on` / `do_off` +* `aws_on` / `aws_off` + +## Usage in shell + +Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either: + +* `gcp_on` / `gcp_off` +* `do_on` / `do_off` +* `aws_on` / `aws_off` + +Indeed, the functions defined in `setup.sh` are also exported as aliases, so you can call them from your shell directly. + +Other aliases are also defined, in order to make your life easier: + +* `tf_ssh`: to ease SSH-ing into the virtual machines, reading the username and IP address to use from Terraform, as well as setting default SSH options. +* `tf_ansi`: to ease applying an Ansible playbook to a set of virtual machines, dynamically creating the inventory, as well as setting default SSH options. diff --git a/provisioning/aws/README.md b/provisioning/aws/README.md new file mode 100644 index 0000000..f4f018f --- /dev/null +++ b/provisioning/aws/README.md @@ -0,0 +1,90 @@ +# Amazon Web Services + +## Introduction + +This project allows you to get hold of some machine on Amazon Web Services. +You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. + +## Setup + +* Log in [weaveworks.signin.aws.amazon.com/console](https://weaveworks.signin.aws.amazon.com/console/) with your account. + +* Go to `Services` > `IAM` > `Users` > Click on your username > `Security credentials` > `Create access key`. + Your access key and secret key will appear on the screen. Set these as environment variables: + +``` +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +``` + +* Go to `Services` > `EC2` > Select the availability zone you want to use (see top right corner, e.g. `us-east-1`) > `Import Key Pair`. + Enter your SSH public key and the name for it, and click `Import`. + Set the path to your private key as an environment variable: + +``` +export TF_VAR_aws_public_key_name= +export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa" +``` + +* Set your current IP address as an environment variable: + +``` +export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) +``` + + or pass it as a Terraform variable: + +``` +$ terraform -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)' +``` + +### Bash aliases + +You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file: + +``` +function _aws_on() { + export AWS_ACCESS_KEY_ID="" # Replace with appropriate value. + export AWS_SECRET_ACCESS_KEY="" # Replace with appropriate value. + export TF_VAR_aws_public_key_name="" # Replace with appropriate value. + export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value. +} +alias _aws_on='_aws_on' +function _aws_off() { + unset AWS_ACCESS_KEY_ID + unset AWS_SECRET_ACCESS_KEY + unset TF_VAR_aws_public_key_name + unset TF_VAR_aws_private_key_path +} +alias _aws_off='_aws_off' +``` + +N.B.: + +* sourcing `../setup.sh` defines aliases called `aws_on` and `aws_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above); +* `../setup.sh`'s `aws_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information. + +## Usage + +* Create the machine: `terraform apply` +* Show the machine's status: `terraform show` +* Stop and destroy the machine: `terraform destroy` +* SSH into the newly-created machine: + +``` +$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips` +# N.B.: the default username will differ depending on the AMI/OS you installed, e.g. ubuntu for Ubuntu, ec2-user for Red Hat, etc. +``` + +or + +``` +source ../setup.sh +tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned. +``` + +## Resources + +* [https://www.terraform.io/docs/providers/aws/](https://www.terraform.io/docs/providers/aws/) +* [https://www.terraform.io/docs/providers/aws/r/instance.html](https://www.terraform.io/docs/providers/aws/r/instance.html) +* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html) diff --git a/provisioning/aws/main.tf b/provisioning/aws/main.tf new file mode 100755 index 0000000..f4be8c3 --- /dev/null +++ b/provisioning/aws/main.tf @@ -0,0 +1,137 @@ +# Specify the provider and access details +provider "aws" { + # Access key, secret key and region are sourced from environment variables or input arguments -- see README.md + region = "${var.aws_dc}" +} + +resource "aws_security_group" "allow_ssh" { + name = "${var.name}_allow_ssh" + description = "AWS security group to allow SSH-ing onto AWS EC2 instances (created using Terraform)." + + # Open TCP port for SSH: + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${var.client_ip}/32"] + } + + tags { + Name = "${var.name}_allow_ssh" + App = "${var.app}" + CreatedBy = "terraform" + } +} + +resource "aws_security_group" "allow_docker" { + name = "${var.name}_allow_docker" + description = "AWS security group to allow communication with Docker on AWS EC2 instances (created using Terraform)." + + # Open TCP port for Docker: + ingress { + from_port = 2375 + to_port = 2375 + protocol = "tcp" + cidr_blocks = ["${var.client_ip}/32"] + } + + tags { + Name = "${var.name}_allow_docker" + App = "${var.app}" + CreatedBy = "terraform" + } +} + +resource "aws_security_group" "allow_weave" { + name = "${var.name}_allow_weave" + description = "AWS security group to allow communication with Weave on AWS EC2 instances (created using Terraform)." + + # Open TCP port for Weave: + ingress { + from_port = 12375 + to_port = 12375 + protocol = "tcp" + cidr_blocks = ["${var.client_ip}/32"] + } + + tags { + Name = "${var.name}_allow_weave" + App = "${var.app}" + CreatedBy = "terraform" + } +} + +resource "aws_security_group" "allow_private_ingress" { + name = "${var.name}_allow_private_ingress" + description = "AWS security group to allow all private ingress traffic on AWS EC2 instances (created using Terraform)." + + # Full inbound local network access on both TCP and UDP + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["${var.aws_vpc_cidr_block}"] + } + + tags { + Name = "${var.name}_allow_private_ingress" + App = "${var.app}" + CreatedBy = "terraform" + } +} + +resource "aws_security_group" "allow_all_egress" { + name = "${var.name}_allow_all_egress" + description = "AWS security group to allow all egress traffic on AWS EC2 instances (created using Terraform)." + + # Full outbound internet access on both TCP and UDP + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags { + Name = "${var.name}_allow_all_egress" + App = "${var.app}" + CreatedBy = "terraform" + } +} + +resource "aws_instance" "tf_test_vm" { + instance_type = "${var.aws_size}" + count = "${var.num_hosts}" + + # Lookup the correct AMI based on the region we specified + ami = "${lookup(var.aws_amis, var.aws_dc)}" + + key_name = "${var.aws_public_key_name}" + + security_groups = [ + "${aws_security_group.allow_ssh.name}", + "${aws_security_group.allow_docker.name}", + "${aws_security_group.allow_weave.name}", + "${aws_security_group.allow_private_ingress.name}", + "${aws_security_group.allow_all_egress.name}", + ] + + # Wait for machine to be SSH-able: + provisioner "remote-exec" { + inline = ["exit"] + + connection { + type = "ssh" + + # Lookup the correct username based on the AMI we specified + user = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}" + private_key = "${file("${var.aws_private_key_path}")}" + } + } + + tags { + Name = "${var.name}-${count.index}" + App = "${var.app}" + CreatedBy = "terraform" + } +} diff --git a/provisioning/aws/outputs.tf b/provisioning/aws/outputs.tf new file mode 100755 index 0000000..587986b --- /dev/null +++ b/provisioning/aws/outputs.tf @@ -0,0 +1,54 @@ +output "username" { + value = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}" +} + +output "public_ips" { + value = ["${aws_instance.tf_test_vm.*.public_ip}"] +} + +output "hostnames" { + value = "${join("\n", + "${formatlist("%v.%v.%v", + aws_instance.tf_test_vm.*.tags.Name, + aws_instance.tf_test_vm.*.availability_zone, + var.app + )}" + )}" +} + +# /etc/hosts file for the Droplets: +output "private_etc_hosts" { + value = "${join("\n", + "${formatlist("%v %v.%v.%v", + aws_instance.tf_test_vm.*.private_ip, + aws_instance.tf_test_vm.*.tags.Name, + aws_instance.tf_test_vm.*.availability_zone, + var.app + )}" + )}" +} + +# /etc/hosts file for the client: +output "public_etc_hosts" { + value = "${join("\n", + "${formatlist("%v %v.%v.%v", + aws_instance.tf_test_vm.*.public_ip, + aws_instance.tf_test_vm.*.tags.Name, + aws_instance.tf_test_vm.*.availability_zone, + var.app + )}" + )}" +} + +output "ansible_inventory" { + value = "${format("[all]\n%s", join("\n", + "${formatlist("%v private_ip=%v", + aws_instance.tf_test_vm.*.public_ip, + aws_instance.tf_test_vm.*.private_ip, + )}" + ))}" +} + +output "private_key_path" { + value = "${var.aws_private_key_path}" +} diff --git a/provisioning/aws/variables.tf b/provisioning/aws/variables.tf new file mode 100755 index 0000000..5f4b462 --- /dev/null +++ b/provisioning/aws/variables.tf @@ -0,0 +1,68 @@ +variable "client_ip" { + description = "IP address of the client machine" +} + +variable "app" { + description = "Name of the application using the created EC2 instance(s)." + default = "default" +} + +variable "name" { + description = "Name of the EC2 instance(s)." + default = "test" +} + +variable "num_hosts" { + description = "Number of EC2 instance(s)." + default = 1 +} + +variable "aws_vpc_cidr_block" { + description = "AWS VPC CIDR block to use to attribute private IP addresses." + default = "172.31.0.0/16" +} + +variable "aws_public_key_name" { + description = "Name of the SSH keypair to use in AWS." +} + +variable "aws_private_key_path" { + description = "Path to file containing private key" + default = "~/.ssh/id_rsa" +} + +variable "aws_dc" { + description = "The AWS region to create things in." + default = "us-east-1" +} + +variable "aws_amis" { + default = { + # Ubuntu Server 16.04 LTS (HVM), SSD Volume Type: + "us-east-1" = "ami-40d28157" + "eu-west-2" = "ami-23d0da47" + + # Red Hat Enterprise Linux 7.3 (HVM), SSD Volume Type: + + #"us-east-1" = "ami-b63769a1" + + # CentOS 7 (x86_64) - with Updates HVM + + #"us-east-1" = "ami-6d1c2007" + } +} + +variable "aws_usernames" { + description = "User to SSH as into the AWS instance." + + default = { + "ami-40d28157" = "ubuntu" # Ubuntu Server 16.04 LTS (HVM) + "ami-b63769a1" = "ec2-user" # Red Hat Enterprise Linux 7.3 (HVM) + "ami-6d1c2007" = "centos" # CentOS 7 (x86_64) - with Updates HVM + } +} + +variable "aws_size" { + description = "AWS' selected machine size" + default = "t2.medium" # Instance with 2 cores & 4 GB memory +} diff --git a/provisioning/do/README.md b/provisioning/do/README.md new file mode 100755 index 0000000..d958f18 --- /dev/null +++ b/provisioning/do/README.md @@ -0,0 +1,98 @@ +# Digital Ocean + +## Introduction + +This project allows you to get hold of some machine on Digital Ocean. +You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. + +## Setup + +* Log in [cloud.digitalocean.com](https://cloud.digitalocean.com) with your account. + +* Go to `Settings` > `Security` > `SSH keys` > `Add SSH Key`. + Enter your SSH public key and the name for it, and click `Add SSH Key`. + Set the path to your private key as an environment variable: + +``` +export DIGITALOCEAN_SSH_KEY_NAME= +export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa" +``` + +* Go to `API` > `Tokens` > `Personal access tokens` > `Generate New Token` + Enter your token name and click `Generate Token` to get your 64-characters-long API token. + Set these as environment variables: + +``` +export DIGITALOCEAN_TOKEN_NAME="" +export DIGITALOCEAN_TOKEN= +``` + +* Run the following command to get the Digital Ocean ID for your SSH public key (e.g. `1234567`) and set it as an environment variable: + +``` +$ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" \ +-H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" \ +| jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id') +``` + + or pass it as a Terraform variable: + +``` +$ terraform \ +-var 'do_private_key_path=' \ +-var 'do_public_key_id=' +``` + +### Bash aliases + +You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file: + +``` +function _do_on() { + export DIGITALOCEAN_TOKEN_NAME="" # Replace with appropriate value. + export DIGITALOCEAN_TOKEN= # Replace with appropriate value. + export DIGITALOCEAN_SSH_KEY_NAME="" # Replace with appropriate value. + export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value. + export TF_VAR_do_public_key_path="$HOME/.ssh/id_rsa.pub" # Replace with appropriate value. + export TF_VAR_do_public_key_id= # Replace with appropriate value. +} +alias _do_on='_do_on' +function _do_off() { + unset DIGITALOCEAN_TOKEN_NAME + unset DIGITALOCEAN_TOKEN + unset DIGITALOCEAN_SSH_KEY_NAME + unset TF_VAR_do_private_key_path + unset TF_VAR_do_public_key_path + unset TF_VAR_do_public_key_id +} +alias _do_off='_do_off' +``` + +N.B.: + +* sourcing `../setup.sh` defines aliases called `do_on` and `do_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above); +* `../setup.sh`'s `do_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information. + +## Usage + +* Create the machine: `terraform apply` +* Show the machine's status: `terraform show` +* Stop and destroy the machine: `terraform destroy` +* SSH into the newly-created machine: + +``` +$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips` +``` + +or + +``` +source ../setup.sh +tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned. +``` + +## Resources + +* [https://www.terraform.io/docs/providers/do/](https://www.terraform.io/docs/providers/do/) +* [https://www.terraform.io/docs/providers/do/r/droplet.html](https://www.terraform.io/docs/providers/do/r/droplet.html) +* [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html) diff --git a/provisioning/do/main.tf b/provisioning/do/main.tf new file mode 100755 index 0000000..834cdf7 --- /dev/null +++ b/provisioning/do/main.tf @@ -0,0 +1,42 @@ +provider "digitalocean" { + # See README.md for setup instructions. +} + +# Tags to label and organize droplets: +resource "digitalocean_tag" "name" { + name = "${var.name}" +} + +resource "digitalocean_tag" "app" { + name = "${var.app}" +} + +resource "digitalocean_tag" "terraform" { + name = "terraform" +} + +resource "digitalocean_droplet" "tf_test_vm" { + ssh_keys = ["${var.do_public_key_id}"] + image = "${var.do_os}" + region = "${var.do_dc}" + size = "${var.do_size}" + name = "${var.name}-${count.index}" + count = "${var.num_hosts}" + + tags = [ + "${var.app}", + "${var.name}", + "terraform", + ] + + # Wait for machine to be SSH-able: + provisioner "remote-exec" { + inline = ["exit"] + + connection { + type = "ssh" + user = "${var.do_username}" + private_key = "${file("${var.do_private_key_path}")}" + } + } +} diff --git a/provisioning/do/outputs.tf b/provisioning/do/outputs.tf new file mode 100755 index 0000000..5f0ff45 --- /dev/null +++ b/provisioning/do/outputs.tf @@ -0,0 +1,57 @@ +output "username" { + value = "${var.do_username}" +} + +output "public_ips" { + value = ["${digitalocean_droplet.tf_test_vm.*.ipv4_address}"] +} + +output "hostnames" { + value = "${join("\n", + "${formatlist("%v.%v.%v", + digitalocean_droplet.tf_test_vm.*.name, + digitalocean_droplet.tf_test_vm.*.region, + var.app + )}" + )}" +} + +# /etc/hosts file for the Droplets: +# N.B.: by default Digital Ocean droplets only have public IPs, but in order to +# be consistent with other providers' recipes, we provide an output to generate +# an /etc/hosts file on the Droplets, even though it is using public IPs only. +output "private_etc_hosts" { + value = "${join("\n", + "${formatlist("%v %v.%v.%v", + digitalocean_droplet.tf_test_vm.*.ipv4_address, + digitalocean_droplet.tf_test_vm.*.name, + digitalocean_droplet.tf_test_vm.*.region, + var.app + )}" + )}" +} + +# /etc/hosts file for the client: +output "public_etc_hosts" { + value = "${join("\n", + "${formatlist("%v %v.%v.%v", + digitalocean_droplet.tf_test_vm.*.ipv4_address, + digitalocean_droplet.tf_test_vm.*.name, + digitalocean_droplet.tf_test_vm.*.region, + var.app + )}" + )}" +} + +output "ansible_inventory" { + value = "${format("[all]\n%s", join("\n", + "${formatlist("%v private_ip=%v", + digitalocean_droplet.tf_test_vm.*.ipv4_address, + digitalocean_droplet.tf_test_vm.*.ipv4_address + )}" + ))}" +} + +output "private_key_path" { + value = "${var.do_private_key_path}" +} diff --git a/provisioning/do/variables.tf b/provisioning/do/variables.tf new file mode 100755 index 0000000..6f7f40e --- /dev/null +++ b/provisioning/do/variables.tf @@ -0,0 +1,185 @@ +variable "client_ip" { + description = "IP address of the client machine" +} + +variable "app" { + description = "Name of the application using the created droplet(s)." + default = "default" +} + +variable "name" { + description = "Name of the droplet(s)." + default = "test" +} + +variable "num_hosts" { + description = "Number of droplet(s)." + default = 1 +} + +variable "do_private_key_path" { + description = "Digital Ocean SSH private key path" + default = "~/.ssh/id_rsa" +} + +variable "do_public_key_id" { + description = "Digital Ocean ID for your SSH public key" + + # You can retrieve it and set it as an environment variable this way: + + # $ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" | jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id') +} + +variable "do_username" { + description = "Digital Ocean SSH username" + default = "root" +} + +variable "do_os" { + description = "Digital Ocean OS" + default = "ubuntu-16-04-x64" +} + +# curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/images?page=1&per_page=999999" | jq ".images | .[] | .slug" | grep -P "ubuntu|coreos|centos" | grep -v alpha | grep -v beta +# "ubuntu-16-04-x32" +# "ubuntu-16-04-x64" +# "ubuntu-16-10-x32" +# "ubuntu-16-10-x64" +# "ubuntu-14-04-x32" +# "ubuntu-14-04-x64" +# "ubuntu-12-04-x64" +# "ubuntu-12-04-x32" +# "coreos-stable" +# "centos-6-5-x32" +# "centos-6-5-x64" +# "centos-7-0-x64" +# "centos-7-x64" +# "centos-6-x64" +# "centos-6-x32" +# "centos-5-x64" +# "centos-5-x32" + +# Digital Ocean datacenters +# See also: +# $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/regions" | jq -c ".regions | .[] | .slug" | sort -u + +variable "do_dc_ams2" { + description = "Digital Ocean Amsterdam Datacenter 2" + default = "ams2" +} + +variable "do_dc_ams3" { + description = "Digital Ocean Amsterdam Datacenter 3" + default = "ams3" +} + +variable "do_dc_blr1" { + description = "Digital Ocean Bangalore Datacenter 1" + default = "blr1" +} + +variable "do_dc_fra1" { + description = "Digital Ocean Frankfurt Datacenter 1" + default = "fra1" +} + +variable "do_dc_lon1" { + description = "Digital Ocean London Datacenter 1" + default = "lon1" +} + +variable "do_dc_nyc1" { + description = "Digital Ocean New York Datacenter 1" + default = "nyc1" +} + +variable "do_dc_nyc2" { + description = "Digital Ocean New York Datacenter 2" + default = "nyc2" +} + +variable "do_dc_nyc3" { + description = "Digital Ocean New York Datacenter 3" + default = "nyc3" +} + +variable "do_dc_sfo1" { + description = "Digital Ocean San Francisco Datacenter 1" + default = "sfo1" +} + +variable "do_dc_sfo2" { + description = "Digital Ocean San Francisco Datacenter 2" + default = "sfo2" +} + +variable "do_dc_sgp1" { + description = "Digital Ocean Singapore Datacenter 1" + default = "sgp1" +} + +variable "do_dc_tor1" { + description = "Digital Ocean Toronto Datacenter 1" + default = "tor1" +} + +variable "do_dc" { + description = "Digital Ocean's selected datacenter" + default = "lon1" +} + +variable "do_size" { + description = "Digital Ocean's selected machine size" + default = "4gb" +} + +# Digital Ocean sizes + + +# See also: + + +# $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/sizes" | jq -c ".sizes | .[] | .slug" + + +# "512mb" + + +# "1gb" + + +# "2gb" + + +# "4gb" + + +# "8gb" + + +# "16gb" + + +# "m-16gb" + + +# "32gb" + + +# "m-32gb" + + +# "48gb" + + +# "m-64gb" + + +# "64gb" + + +# "m-128gb" + + +# "m-224gb" + diff --git a/provisioning/gcp/README.md b/provisioning/gcp/README.md new file mode 100755 index 0000000..b2d6622 --- /dev/null +++ b/provisioning/gcp/README.md @@ -0,0 +1,126 @@ +# Google Cloud Platform + +## Introduction + +This project allows you to get hold of some machine on Google Cloud Platform. +You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc. + +## Setup + +* Log in [console.cloud.google.com](https://console.cloud.google.com) with your Google account. + +* Go to `API Manager` > `Credentials` > `Create credentials` > `Service account key`, + in `Service account`, select `Compute Engine default service account`, + in `Key type`, select `JSON`, and then click `Create`. + +* This will download a JSON file to your machine. Place this file wherever you want and then create the following environment variables: + +``` +$ export GOOGLE_CREDENTIALS_FILE="path/to/your.json" +$ export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE") +``` + +* Go to `Compute Engine` > `Metadata` > `SSH keys` and add your username and SSH public key; + or + set it up using `gcloud compute project-info add-metadata --metadata-from-file sshKeys=~/.ssh/id_rsa.pub`. + If you used your default SSH key (i.e. `~/.ssh/id_rsa.pub`), then you do not have anything to do. + Otherwise, you will have to either define the below environment variable: + +``` +$ export TF_VAR_gcp_public_key_path= +$ export TF_VAR_gcp_private_key_path= +``` + + or to pass these as Terraform variables: + +``` +$ terraform \ +-var 'gcp_public_key_path=' \ +-var 'gcp_private_key_path=' +``` + +* Set the username in your public key as an environment variable. + This will be used as the username of the Linux account created on the machine, which you will need to SSH into it later on. + + N.B.: + * GCP already has the username set from the SSH public key you uploaded in the previous step. + * If your username is an email address, e.g. `name@domain.com`, then GCP uses `name` as the username. + +``` +export TF_VAR_gcp_username= +``` + +* Set your current IP address as an environment variable: + +``` +export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) +``` + + or pass it as a Terraform variable: + +``` +$ terraform -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)' +``` + +* Set your project as an environment variable: + +``` +export TF_VAR_gcp_project=weave-net-tests +``` + + or pass it as a Terraform variable: + +``` +$ terraform -var 'gcp_project=weave-net-tests' +``` + +### Bash aliases + +You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file: + +``` +function _gcp_on() { + export GOOGLE_CREDENTIALS_FILE="&authuser=1 + region = "${var.gcp_region}" + + project = "${var.gcp_project}" +} + +resource "google_compute_instance" "tf_test_vm" { + name = "${var.name}-${count.index}" + machine_type = "${var.gcp_size}" + zone = "${var.gcp_zone}" + count = "${var.num_hosts}" + + disk { + image = "${var.gcp_image}" + } + + tags = [ + "${var.app}", + "${var.name}", + "terraform", + ] + + network_interface { + network = "${var.gcp_network}" + + access_config { + // Ephemeral IP + } + } + + metadata { + ssh-keys = "${var.gcp_username}:${file("${var.gcp_public_key_path}")}" + } + + # Wait for machine to be SSH-able: + provisioner "remote-exec" { + inline = ["exit"] + + connection { + type = "ssh" + user = "${var.gcp_username}" + private_key = "${file("${var.gcp_private_key_path}")}" + } + } +} + +resource "google_compute_firewall" "fw-allow-docker-and-weave" { + name = "${var.name}-allow-docker-and-weave" + network = "${var.gcp_network}" + target_tags = ["${var.name}"] + + allow { + protocol = "tcp" + ports = ["2375", "12375"] + } + + source_ranges = ["${var.client_ip}"] +} + +# Required for FastDP crypto in Weave Net: +resource "google_compute_firewall" "fw-allow-esp" { + name = "${var.name}-allow-esp" + network = "${var.gcp_network}" + target_tags = ["${var.name}"] + + allow { + protocol = "esp" + } + + source_ranges = ["${var.gcp_network_global_cidr}"] +} diff --git a/provisioning/gcp/outputs.tf b/provisioning/gcp/outputs.tf new file mode 100755 index 0000000..9aa1e33 --- /dev/null +++ b/provisioning/gcp/outputs.tf @@ -0,0 +1,66 @@ +output "username" { + value = "${var.gcp_username}" +} + +output "public_ips" { + value = ["${google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip}"] +} + +output "hostnames" { + value = "${join("\n", + "${formatlist("%v.%v.%v", + google_compute_instance.tf_test_vm.*.name, + google_compute_instance.tf_test_vm.*.zone, + var.app + )}" + )}" +} + +# /etc/hosts file for the Compute Engine instances: +output "private_etc_hosts" { + value = "${join("\n", + "${formatlist("%v %v.%v.%v", + google_compute_instance.tf_test_vm.*.network_interface.0.address, + google_compute_instance.tf_test_vm.*.name, + google_compute_instance.tf_test_vm.*.zone, + var.app + )}" + )}" +} + +# /etc/hosts file for the client: +output "public_etc_hosts" { + value = "${join("\n", + "${formatlist("%v %v.%v.%v", + google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip, + google_compute_instance.tf_test_vm.*.name, + google_compute_instance.tf_test_vm.*.zone, + var.app + )}" + )}" +} + +output "ansible_inventory" { + value = "${format("[all]\n%s", join("\n", + "${formatlist("%v private_ip=%v", + google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip, + google_compute_instance.tf_test_vm.*.network_interface.0.address + )}" + ))}" +} + +output "private_key_path" { + value = "${var.gcp_private_key_path}" +} + +output "instances_names" { + value = ["${google_compute_instance.tf_test_vm.*.name}"] +} + +output "image" { + value = "${var.gcp_image}" +} + +output "zone" { + value = "${var.gcp_zone}" +} diff --git a/provisioning/gcp/variables.tf b/provisioning/gcp/variables.tf new file mode 100755 index 0000000..6b2027b --- /dev/null +++ b/provisioning/gcp/variables.tf @@ -0,0 +1,77 @@ +variable "gcp_username" { + description = "Google Cloud Platform SSH username" +} + +variable "app" { + description = "Name of the application using the created Compute Engine instance(s)." + default = "default" +} + +variable "name" { + description = "Name of the Compute Engine instance(s)." + default = "test" +} + +variable "num_hosts" { + description = "Number of Compute Engine instance(s)." + default = 1 +} + +variable "client_ip" { + description = "IP address of the client machine" +} + +variable "gcp_public_key_path" { + description = "Path to file containing public key" + default = "~/.ssh/id_rsa.pub" +} + +variable "gcp_private_key_path" { + description = "Path to file containing private key" + default = "~/.ssh/id_rsa" +} + +variable "gcp_project" { + description = "Google Cloud Platform project" + default = "weave-net-tests" +} + +variable "gcp_image" { + # See also: https://cloud.google.com/compute/docs/images + # For example: + # - "ubuntu-os-cloud/ubuntu-1604-lts" + # - "debian-cloud/debian-8" + # - "centos-cloud/centos-7" + # - "rhel-cloud/rhel7" + description = "Google Cloud Platform OS" + + default = "ubuntu-os-cloud/ubuntu-1604-lts" +} + +variable "gcp_size" { + # See also: + # $ gcloud compute machine-types list + description = "Google Cloud Platform's selected machine size" + + default = "n1-standard-1" +} + +variable "gcp_region" { + description = "Google Cloud Platform's selected region" + default = "us-central1" +} + +variable "gcp_zone" { + description = "Google Cloud Platform's selected zone" + default = "us-central1-a" +} + +variable "gcp_network" { + description = "Google Cloud Platform's selected network" + default = "test" +} + +variable "gcp_network_global_cidr" { + description = "CIDR covering all regions for the selected Google Cloud Platform network" + default = "10.128.0.0/9" +} diff --git a/provisioning/setup.sh b/provisioning/setup.sh new file mode 100755 index 0000000..456878e --- /dev/null +++ b/provisioning/setup.sh @@ -0,0 +1,361 @@ +#!/bin/bash +# +# Description: +# Helper functions to programmatically provision (e.g. for CIT). +# Aliases on these functions are also created so that this script can be +# sourced in your shell, in your ~/.bashrc file, etc. and directly called. +# +# Usage: +# Source this file and call the relevant functions. +# + +function ssh_public_key() { + echo -e "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZBgLQts30PYXEMJnCU21QC+1ZE0Sv/Ry48Au3nYXn1KNoW/7C2qQ3KO2ZnpZRHCstFiU8QIlB9edi0cgcAoDWBkCiFBZEORxMvohWtrRQzf+x59o48lVjA/Fn7G+9hmavhLaDf6Qe7OhH8XUshNtnIQIUvNEWXKE75k32wUbuF8ibhJNpOOYKL4tVXK6IIKg6jR88BwGKPY/NZCl/HbhjnDJY0zCU1pZSprN6o/S953y/XXVozkh1772fCNeu4USfbt0oZOEJ57j6EWwEYIJhoeAEMAoD8ELt/bc/5iex8cuarM4Uib2JHO6WPWbBQ0NlrARIOKLrxkjjfGWarOLWBAgvwQn5zLg1pKb7aI4+jbA+ZSrII5B2HuYE9MDlU8NPL4pHrRfapGLkG/Fe9zNPvScXh+9iSWfD6G5ZoISutjiJO/iVYN0QSuj9QEIj9tl20czFz3Dhnq4sPPl5hoLunyQfajY7C/ipv6ilJyrEc0V6Z9FdPhpEI+HOgJr2vDQTFscQuyfWuzGJDZf6zPdZWo2pBql9E7piARuNAjakylGar/ebkCgfy28XQoDbDT0P0VYp+E8W5EYacx+zc5MuNhRTvbsO12fydT8V61MtA78wM/b0059feph+0zTykEHk670mYVoE3erZX+U1/BVBLSV9QzopO6/Pgx2ryriJfQ== weaveworks-cit" +} + +function decrypt() { + if [ -z "$1" ]; then + echo >&2 "Failed to decode and decrypt $2: no secret key was provided." + return 1 + fi + echo "$3" | openssl base64 -d | openssl enc -d -aes256 -pass "pass:$1" +} + +function ssh_private_key() { + # The private key has been AES256-encrypted and then Base64-encoded using the following command: + # $ openssl enc -in /tmp/weaveworks_cit_id_rsa -e -aes256 -pass stdin | openssl base64 > /tmp/weaveworks_cit_id_rsa.aes.b64 + # The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout. + # N.B.: Ask the password to Marc, or otherwise re-generate the SSH key using: + # $ ssh-keygen -t rsa -b 4096 -C "weaveworks-cit" + decrypt "$1" "SSH private key" "$( + cat <&2 "Failed to decode and decrypt SSH private key: no secret key was provided." + return 1 + fi + local ssh_private_key_path="$HOME/.ssh/weaveworks_cit_id_rsa" + [ -e "$ssh_private_key_path" ] && rm -f "$ssh_private_key_path" + ssh_private_key "$1" >"$ssh_private_key_path" + chmod 400 "$ssh_private_key_path" + echo "$ssh_private_key_path" +} + +function gcp_credentials() { + # The below GCP service account JSON credentials have been AES256-encrypted and then Base64-encoded using the following command: + # $ openssl enc -in ~/.ssh/weaveworks-cit.json -e -aes256 -pass stdin | openssl base64 > /tmp/weaveworks-cit.json.aes.b64 + # The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout. + # N.B.: Ask the password to Marc, or otherwise re-generate the credentials for GCP, as per ../tools/provisioning/gcp/README.md. + decrypt "$1" "JSON credentials" "$( + cat <&2 "Failed to configure for Digital Ocean: no value for the SECRET_KEY environment variable." + return 1 + fi + + # SSH public key: + export TF_VAR_do_public_key_path="$HOME/.ssh/weaveworks_cit_id_rsa.pub" + ssh_public_key >"$TF_VAR_do_public_key_path" + export DIGITALOCEAN_SSH_KEY_NAME="weaveworks-cit" + export TF_VAR_do_public_key_id=5228799 + + # SSH private key: + export TF_VAR_do_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY") + + # API token: + # The below Digital Ocean token has been AES256-encrypted and then Base64-encoded using the following command: + # $ openssl enc -in /tmp/digital_ocean_token.txt -e -aes256 -pass stdin | openssl base64 > /tmp/digital_ocean_token.txt.aes.b64 + # The below command does the reverse, i.e. base64-decode and AES-decrypt the file, and prints it to stdout. + # N.B.: Ask the password to Marc, or otherwise re-generate the token for Digital Ocean, as per ../tools/provisioning/do/README.md. + export DIGITALOCEAN_TOKEN=$(decrypt "$SECRET_KEY" "Digital Ocean token" "U2FsdGVkX1/Gq5Rj9dDDraME8xK30JOyJ9dhfQzPBaaePJHqDPIG6of71DdJW0UyFUyRtbRflCPaZ8Um1pDJpU5LoNWQk4uCApC8+xciltT73uQtttLBG8FqgFBvYIHS") + export DIGITALOCEAN_TOKEN_NAME="weaveworks-cit" + export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) +} +alias do_on='do_on' + +function do_off() { + unset TF_VAR_do_public_key_path + unset DIGITALOCEAN_SSH_KEY_NAME + unset TF_VAR_do_public_key_id + unset TF_VAR_do_private_key_path + unset DIGITALOCEAN_TOKEN + unset DIGITALOCEAN_TOKEN_NAME + unset TF_VAR_client_ip +} +alias do_off='do_off' + +# shellcheck disable=2155 +function gcp_on() { + # Set up everything required to run tests on GCP. + # Steps from ../tools/provisioning/gcp/README.md have been followed. + # All sensitive files have been encrypted, see respective functions. + if [ -z "$SECRET_KEY" ]; then + echo >&2 "Failed to configure for Google Cloud Platform: no value for the SECRET_KEY environment variable." + return 1 + fi + + # SSH public key and SSH username: + export TF_VAR_gcp_public_key_path="$HOME/.ssh/weaveworks_cit_id_rsa.pub" + ssh_public_key >"$TF_VAR_gcp_public_key_path" + export TF_VAR_gcp_username=$(cut -d' ' -f3 "$TF_VAR_gcp_public_key_path" | cut -d'@' -f1) + + # SSH private key: + export TF_VAR_gcp_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY") + + # JSON credentials: + export GOOGLE_CREDENTIALS_FILE="$HOME/.ssh/weaveworks-cit.json" + [ -e "$GOOGLE_CREDENTIALS_FILE" ] && rm -f "$GOOGLE_CREDENTIALS_FILE" + gcp_credentials "$SECRET_KEY" >"$GOOGLE_CREDENTIALS_FILE" + chmod 400 "$GOOGLE_CREDENTIALS_FILE" + export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE") + + export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/) + export TF_VAR_gcp_project="${PROJECT:-"weave-net-tests"}" + # shellcheck disable=2015 + [ -z "$PROJECT" ] && echo >&2 "WARNING: no value provided for PROJECT environment variable: defaulted it to $TF_VAR_gcp_project." || true +} +alias gcp_on='gcp_on' + +function gcp_off() { + unset TF_VAR_gcp_public_key_path + unset TF_VAR_gcp_username + unset TF_VAR_gcp_private_key_path + unset GOOGLE_CREDENTIALS_FILE + unset GOOGLE_CREDENTIALS + unset TF_VAR_client_ip + unset TF_VAR_gcp_project +} +alias gcp_off='gcp_off' + +# shellcheck disable=2155 +function aws_on() { + # Set up everything required to run tests on Amazon Web Services. + # Steps from ../tools/provisioning/aws/README.md have been followed. + # All sensitive files have been encrypted, see respective functions. + if [ -z "$SECRET_KEY" ]; then + echo >&2 "Failed to configure for Amazon Web Services: no value for the SECRET_KEY environment variable." + return 1 + fi + + # SSH public key: + export TF_VAR_aws_public_key_name="weaveworks_cit_id_rsa" + + # SSH private key: + export TF_VAR_aws_private_key_path=$(set_up_ssh_private_key "$SECRET_KEY") + + # The below AWS access key ID and secret access key have been AES256-encrypted and then Base64-encoded using the following commands: + # $ openssl enc -in /tmp/aws_access_key_id.txt -e -aes256 -pass stdin | openssl base64 > /tmp/aws_access_key_id.txt.aes.b64 + # $ openssl enc -in /tmp/aws_secret_access_key.txt -e -aes256 -pass stdin | openssl base64 > /tmp/aws_secret_access_key.txt.aes.b64 + # The below commands do the reverse, i.e. base64-decode and AES-decrypt the encrypted and encoded strings, and print it to stdout. + # N.B.: Ask the password to Marc, or otherwise re-generate the AWS access key ID and secret access key, as per ../tools/provisioning/aws/README.md. + export AWS_ACCESS_KEY_ID="$(decrypt "$SECRET_KEY" "AWS access key ID" "U2FsdGVkX18Txjm2PWSlJsToYm1vv4dMTtVLkRNiQbrC6Y6GuIHb1ao5MmGPJ1wf")" + export AWS_SECRET_ACCESS_KEY="$(decrypt "$SECRET_KEY" "AWS secret access key" "$( + cat <&2 <<-EOF +ERROR: $1 + +Usage: + \$ tf_ssh [OPTION]... +Examples: + \$ tf_ssh 1 + \$ tf_ssh 1 -o LogLevel VERBOSE + \$ tf_ssh 1 -i ~/.ssh/custom_private_key_id_rsa +Available machines: +EOF + cat -n >&2 <<<"$(terraform output public_etc_hosts)" +} + +# shellcheck disable=SC2155 +function tf_ssh() { + [ -z "$1" ] && tf_ssh_usage "No host ID provided." && return 1 + local ip="$(sed "$1q;d" <<<"$(terraform output public_etc_hosts)" | cut -d ' ' -f 1)" + shift # Drop the first argument, corresponding to the machine ID, to allow passing other arguments to SSH using "$@" -- see below. + [ -z "$ip" ] && tf_ssh_usage "Invalid host ID provided." && return 1 + # shellcheck disable=SC2029 + ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" "$(terraform output username)@$ip" +} +alias tf_ssh='tf_ssh' + +function tf_ansi_usage() { + cat >&2 <<-EOF +ERROR: $1 + +Usage: + \$ tf_ansi [OPTION]... +Examples: + \$ tf_ansi setup_weave-net_dev + \$ tf_ansi 1 + \$ tf_ansi 1 -vvv --private-key=~/.ssh/custom_private_key_id_rsa + \$ tf_ansi setup_weave-kube --extra-vars "docker_version=1.12.6 kubernetes_version=1.5.6" +Available playbooks: +EOF + cat -n >&2 <<<"$(for file in "$(dirname "${BASH_SOURCE[0]}")"/../../config_management/*.yml; do basename "$file" | sed 's/.yml//'; done)" +} + +# shellcheck disable=SC2155,SC2064 +function tf_ansi() { + [ -z "$1" ] && tf_ansi_usage "No Ansible playbook provided." && return 1 + local id="$1" + shift # Drop the first argument to allow passing other arguments to Ansible using "$@" -- see below. + if [[ "$id" =~ ^[0-9]+$ ]]; then + local playbooks=(../../config_management/*.yml) + local path="${playbooks[(($id - 1))]}" # Select the ith entry in the list of playbooks (0-based). + else + local path="$(dirname "${BASH_SOURCE[0]}")/../../config_management/$id.yml" + fi + local inventory="$(mktemp /tmp/ansible_inventory_XXX)" + trap 'rm -f $inventory' SIGINT SIGTERM RETURN + echo -e "$(terraform output ansible_inventory)" >"$inventory" + [ ! -r "$path" ] && tf_ansi_usage "Ansible playbook not found: $path" && return 1 + ansible-playbook "$@" -u "$(terraform output username)" -i "$inventory" --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" "$path" +} +alias tf_ansi='tf_ansi' diff --git a/push-images b/push-images new file mode 100755 index 0000000..60edac7 --- /dev/null +++ b/push-images @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +QUAY_PREFIX=quay.io/ +IMAGES=$(make images) +IMAGE_TAG=$(./tools/image-tag) + +usage() { + echo "$0 [-no-docker-hub]" +} + +NO_DOCKER_HUB= +while [ $# -gt 0 ]; do + case "$1" in + -no-docker-hub) + NO_DOCKER_HUB=1 + shift 1 + ;; + *) + usage + exit 2 + ;; + esac +done + +pids="" +for image in ${IMAGES}; do + if [[ "$image" == *"build"* ]]; then + continue + fi + echo "Will push ${image}:${IMAGE_TAG}" + docker push "${image}:${IMAGE_TAG}" & + pids="$pids $!" + + if [ -z "$NO_DOCKER_HUB" ]; then + # remove the quey prefix and push to docker hub + docker_hub_image=${image#$QUAY_PREFIX} + docker tag "${image}:${IMAGE_TAG}" "${docker_hub_image}:${IMAGE_TAG}" + echo "Will push ${docker_hub_image}:${IMAGE_TAG}" + docker push "${docker_hub_image}:${IMAGE_TAG}" & + pids="$pids $!" + fi +done + +# Wait individually for tasks so we fail-exit on any non-zero return code +for p in $pids; do + wait $p +done + +wait diff --git a/runner/runner.go b/runner/runner.go index c92ac6b..38e5a62 100644 --- a/runner/runner.go +++ b/runner/runner.go @@ -15,7 +15,7 @@ import ( "time" "github.com/mgutz/ansi" - "github.com/weaveworks/docker/pkg/mflag" + "github.com/weaveworks/common/mflag" ) const ( @@ -148,9 +148,10 @@ func updateScheduler(test string, duration float64) { func getSchedule(tests []string) ([]string, error) { var ( + userName = os.Getenv("CIRCLE_PROJECT_USERNAME") project = os.Getenv("CIRCLE_PROJECT_REPONAME") buildNum = os.Getenv("CIRCLE_BUILD_NUM") - testRun = project + "-integration-" + buildNum + testRun = userName + "-" + project + "-integration-" + buildNum shardCount = os.Getenv("CIRCLE_NODE_TOTAL") shardID = os.Getenv("CIRCLE_NODE_INDEX") requestBody = &bytes.Buffer{} diff --git a/sched b/sched index cf47773..a282558 100755 --- a/sched +++ b/sched @@ -1,20 +1,20 @@ -#!/usr/bin/python -import sys, string, json, urllib +#!/usr/bin/env python +import sys, string, urllib import requests import optparse def test_time(target, test_name, runtime): r = requests.post(target + "/record/%s/%f" % (urllib.quote(test_name, safe=""), runtime)) - print r.text + print r.text.encode('utf-8') assert r.status_code == 204 def test_sched(target, test_run, shard_count, shard_id): - tests = json.dumps({'tests': string.split(sys.stdin.read())}) - r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), data=tests) + tests = {'tests': string.split(sys.stdin.read())} + r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), json=tests) assert r.status_code == 200 result = r.json() for test in sorted(result['tests']): - print test + print test.encode('utf-8') def usage(): print "%s (--target=...) " % sys.argv[0] diff --git a/scheduler/main.py b/scheduler/main.py index 8907e20..3b540b5 100644 --- a/scheduler/main.py +++ b/scheduler/main.py @@ -19,120 +19,188 @@ app.debug = True # observations faster. alpha = 0.3 + class Test(ndb.Model): - total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA - total_runs = ndb.IntegerProperty(default=0) + total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA + total_runs = ndb.IntegerProperty(default=0) - def parallelism(self): - name = self.key.string_id() - m = re.search('(\d+)_test.sh$', name) - if m is None: - return 1 - else: - return int(m.group(1)) + def parallelism(self): + name = self.key.string_id() + m = re.search('(\d+)_test.sh$', name) + if m is None: + return 1 + else: + return int(m.group(1)) + + def cost(self): + p = self.parallelism() + logging.info("Test %s has parallelism %d and avg run time %s", + self.key.string_id(), p, self.total_run_time) + return self.parallelism() * self.total_run_time - def cost(self): - p = self.parallelism() - logging.info("Test %s has parallelism %d and avg run time %s", self.key.string_id(), p, self.total_run_time) - return self.parallelism() * self.total_run_time class Schedule(ndb.Model): - shards = ndb.JsonProperty() + shards = ndb.JsonProperty() + @app.route('/record//', methods=['POST']) @ndb.transactional def record(test_name, runtime): - test = Test.get_by_id(test_name) - if test is None: - test = Test(id=test_name) - test.total_run_time = (test.total_run_time * (1-alpha)) + (float(runtime) * alpha) - test.total_runs += 1 - test.put() - return ('', 204) + test = Test.get_by_id(test_name) + if test is None: + test = Test(id=test_name) + test.total_run_time = (test.total_run_time * + (1 - alpha)) + (float(runtime) * alpha) + test.total_runs += 1 + test.put() + return ('', 204) -@app.route('/schedule///', methods=['POST']) + +@app.route( + '/schedule///', methods=['POST']) def schedule(test_run, shard_count, shard): - # read tests from body - test_names = flask.request.get_json(force=True)['tests'] + # read tests from body + test_names = flask.request.get_json(force=True)['tests'] - # first see if we have a scedule already - schedule_id = "%s-%d" % (test_run, shard_count) - schedule = Schedule.get_by_id(schedule_id) - if schedule is not None: + # first see if we have a scedule already + schedule_id = "%s-%d" % (test_run, shard_count) + schedule = Schedule.get_by_id(schedule_id) + if schedule is not None: + return flask.json.jsonify(tests=schedule.shards[str(shard)]) + + # if not, do simple greedy algorithm + test_times = ndb.get_multi( + ndb.Key(Test, test_name) for test_name in test_names) + + def avg(test): + if test is not None: + return test.cost() + return 1 + + test_times = [(test_name, avg(test)) + for test_name, test in zip(test_names, test_times)] + test_times_dict = dict(test_times) + test_times.sort(key=operator.itemgetter(1)) + + shards = {i: [] for i in xrange(shard_count)} + while test_times: + test_name, time = test_times.pop() + + # find shortest shard and put it in that + s, _ = min( + ((i, sum(test_times_dict[t] for t in shards[i])) + for i in xrange(shard_count)), + key=operator.itemgetter(1)) + + shards[s].append(test_name) + + # atomically insert or retrieve existing schedule + schedule = Schedule.get_or_insert(schedule_id, shards=shards) return flask.json.jsonify(tests=schedule.shards[str(shard)]) - # if not, do simple greedy algorithm - test_times = ndb.get_multi(ndb.Key(Test, test_name) for test_name in test_names) - def avg(test): - if test is not None: - return test.cost() - return 1 - test_times = [(test_name, avg(test)) for test_name, test in zip(test_names, test_times)] - test_times_dict = dict(test_times) - test_times.sort(key=operator.itemgetter(1)) - shards = {i: [] for i in xrange(shard_count)} - while test_times: - test_name, time = test_times.pop() +FIREWALL_REGEXES = [ + re.compile( + r'^(?P\w+)-allow-(?P\w+)-(?P\d+)-(?P\d+)$' + ), + re.compile(r'^(?P\w+)-(?P\d+)-(?P\d+)-allow-' + r'(?P[\w\-]+)$'), +] +NAME_REGEXES = [ + re.compile(r'^host(?P\d+)-(?P\d+)-(?P\d+)$'), + re.compile(r'^test-(?P\d+)-(?P\d+)-(?P\d+)$'), +] - # find shortest shard and put it in that - s, _ = min(((i, sum(test_times_dict[t] for t in shards[i])) - for i in xrange(shard_count)), key=operator.itemgetter(1)) - shards[s].append(test_name) +def _matches_any_regex(name, regexes): + for regex in regexes: + matches = regex.match(name) + if matches: + return matches - # atomically insert or retrieve existing schedule - schedule = Schedule.get_or_insert(schedule_id, shards=shards) - return flask.json.jsonify(tests=schedule.shards[str(shard)]) - -NAME_RE = re.compile(r'^host(?P\d+)-(?P\d+)-(?P\d+)$') PROJECTS = [ - ('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a'), - ('weaveworks/scope', 'scope-integration-tests', 'us-central1-a'), + ('weaveworks/weave', 'weave-net-tests', 'us-central1-a', True), + ('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a', True), + ('weaveworks/scope', 'scope-integration-tests', 'us-central1-a', False), ] + @app.route('/tasks/gc') def gc(): - # Get list of running VMs, pick build id out of VM name - credentials = GoogleCredentials.get_application_default() - compute = discovery.build('compute', 'v1', credentials=credentials) + # Get list of running VMs, pick build id out of VM name + credentials = GoogleCredentials.get_application_default() + compute = discovery.build('compute', 'v1', credentials=credentials) - for repo, project, zone in PROJECTS: - gc_project(compute, repo, project, zone) + for repo, project, zone, gc_fw in PROJECTS: + gc_project(compute, repo, project, zone, gc_fw) - return "Done" + return "Done" -def gc_project(compute, repo, project, zone): - logging.info("GCing %s, %s, %s", repo, project, zone) - instances = compute.instances().list(project=project, zone=zone).execute() - if 'items' not in instances: - return - host_by_build = collections.defaultdict(list) - for instance in instances['items']: - matches = NAME_RE.match(instance['name']) - if matches is None: - continue - host_by_build[int(matches.group('build'))].append(instance['name']) - logging.info("Running VMs by build: %r", host_by_build) +def gc_project(compute, repo, project, zone, gc_fw): + logging.info("GCing %s, %s, %s", repo, project, zone) + # Get list of builds, filter down to running builds: + running = _get_running_builds(repo) + # Stop VMs for builds that aren't running: + _gc_compute_engine_instances(compute, project, zone, running) + # Remove firewall rules for builds that aren't running: + if gc_fw: + _gc_firewall_rules(compute, project, running) - # Get list of builds, filter down to runnning builds - result = urlfetch.fetch('https://circleci.com/api/v1/project/%s' % repo, - headers={'Accept': 'application/json'}) - assert result.status_code == 200 - builds = json.loads(result.content) - running = {build['build_num'] for build in builds if not build.get('stop_time')} - logging.info("Runnings builds: %r", running) - # Stop VMs for builds that aren't running - stopped = [] - for build, names in host_by_build.iteritems(): - if build in running: - continue - for name in names: - stopped.append(name) - logging.info("Stopping VM %s", name) - compute.instances().delete(project=project, zone=zone, instance=name).execute() +def _get_running_builds(repo): + result = urlfetch.fetch( + 'https://circleci.com/api/v1/project/%s' % repo, + headers={'Accept': 'application/json'}) + assert result.status_code == 200 + builds = json.loads(result.content) + running = { + build['build_num'] + for build in builds if not build.get('stop_time') + } + logging.info("Runnings builds: %r", running) + return running - return + +def _get_hosts_by_build(instances): + host_by_build = collections.defaultdict(list) + for instance in instances['items']: + matches = _matches_any_regex(instance['name'], NAME_REGEXES) + if not matches: + continue + host_by_build[int(matches.group('build'))].append(instance['name']) + logging.info("Running VMs by build: %r", host_by_build) + return host_by_build + + +def _gc_compute_engine_instances(compute, project, zone, running): + instances = compute.instances().list(project=project, zone=zone).execute() + if 'items' not in instances: + return + host_by_build = _get_hosts_by_build(instances) + stopped = [] + for build, names in host_by_build.iteritems(): + if build in running: + continue + for name in names: + stopped.append(name) + logging.info("Stopping VM %s", name) + compute.instances().delete( + project=project, zone=zone, instance=name).execute() + return stopped + + +def _gc_firewall_rules(compute, project, running): + firewalls = compute.firewalls().list(project=project).execute() + if 'items' not in firewalls: + return + for firewall in firewalls['items']: + matches = _matches_any_regex(firewall['name'], FIREWALL_REGEXES) + if not matches: + continue + if int(matches.group('build')) in running: + continue + logging.info("Deleting firewall rule %s", firewall['name']) + compute.firewalls().delete( + project=project, firewall=firewall['name']).execute() diff --git a/socks/main.go b/socks/main.go index 83a2149..7cd8c70 100644 --- a/socks/main.go +++ b/socks/main.go @@ -9,8 +9,8 @@ import ( "text/template" socks5 "github.com/armon/go-socks5" - "github.com/weaveworks/docker/pkg/mflag" - "github.com/weaveworks/weave/common/mflagext" + "github.com/weaveworks/common/mflag" + "github.com/weaveworks/common/mflagext" "golang.org/x/net/context" ) diff --git a/test b/test index 6a97088..c87bdd0 100755 --- a/test +++ b/test @@ -3,12 +3,15 @@ set -e DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -GO_TEST_ARGS=(-tags netgo -cpu 4 -timeout 8m) SLOW= -NO_GO_GET= +NO_GO_GET=true +TAGS= +PARALLEL= +RACE="-race -covermode=atomic" +TIMEOUT=1m usage() { - echo "$0 [-slow] [-in-container foo]" + echo "$0 [-slow] [-in-container foo] [-netgo] [-(no-)go-get] [-timeout 1m]" } while [ $# -gt 0 ]; do @@ -17,10 +20,34 @@ while [ $# -gt 0 ]; do SLOW=true shift 1 ;; + "-no-race") + RACE= + shift 1 + ;; "-no-go-get") NO_GO_GET=true shift 1 ;; + "-go-get") + NO_GO_GET= + shift 1 + ;; + "-netgo") + TAGS="netgo" + shift 1 + ;; + "-tags") + TAGS="$2" + shift 2 + ;; + "-p") + PARALLEL=true + shift 1 + ;; + "-timeout") + TIMEOUT=$2 + shift 2 + ;; *) usage exit 2 @@ -28,12 +55,14 @@ while [ $# -gt 0 ]; do esac done +GO_TEST_ARGS=(-tags "${TAGS[@]}" -cpu 4 -timeout $TIMEOUT) + if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then SLOW=true fi if [ -n "$SLOW" ]; then - GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" -race -covermode=atomic) + GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" ${RACE}) # shellcheck disable=SC2153 if [ -n "$COVERDIR" ]; then @@ -49,7 +78,7 @@ fail=0 if [ -z "$TESTDIRS" ]; then # NB: Relies on paths being prefixed with './'. - TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|prog|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|')) + TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|')) else # TESTDIRS on the right side is not really an array variable, it # is just a string with spaces, but it is written like that to @@ -60,7 +89,7 @@ fi # If running on circle, use the scheduler to work out what tests to run on what shard if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then PREFIX=$(go list -e ./ | sed -e 's/\//-/g') - TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX")) + TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_PROJECT_USERNAME-$CIRCLE_PROJECT_REPONAME-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX")) echo "${TESTDIRS[@]}" fi @@ -69,33 +98,51 @@ PACKAGE_BASE=$(go list -e ./) # Speed up the tests by compiling and installing their dependencies first. go test -i "${GO_TEST_ARGS[@]}" "${TESTDIRS[@]}" -for dir in "${TESTDIRS[@]}"; do +run_test() { + local dir=$1 if [ -z "$NO_GO_GET" ]; then - go get -t -tags netgo "$dir" + go get -t -tags "${TAGS[@]}" "$dir" fi - GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}") + local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}") if [ -n "$SLOW" ]; then + local COVERPKGS COVERPKGS=$( ( go list "$dir" go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/" ) | paste -s -d, -) + local output output=$(mktemp "$coverdir/unit.XXXXXXXXXX") - GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS) + local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS) fi + local START START=$(date +%s) if ! go test "${GO_TEST_ARGS_RUN[@]}" "$dir"; then fail=1 fi - RUNTIME=$(($(date +%s) - START)) + local END + END=$(date +%s) + local RUNTIME=$((END - START)) # Report test runtime when running on circle, to help scheduler if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then "$DIR/sched" time "$dir" "$RUNTIME" fi +} + +for dir in "${TESTDIRS[@]}"; do + if [ -n "$PARALLEL" ]; then + run_test "$dir" & + else + run_test "$dir" + fi done +if [ -n "$PARALLEL" ]; then + wait +fi + if [ -n "$SLOW" ] && [ -z "$COVERDIR" ]; then go get github.com/weaveworks/tools/cover cover "$coverdir"/* >profile.cov diff --git a/vendor/github.com/mvdan/sh/LICENSE b/vendor/github.com/mvdan/sh/LICENSE deleted file mode 100644 index 7d71d51..0000000 --- a/vendor/github.com/mvdan/sh/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Daniel Martí. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mvdan/sh/cmd/shfmt/main.go b/vendor/github.com/mvdan/sh/cmd/shfmt/main.go deleted file mode 100644 index 08f99e0..0000000 --- a/vendor/github.com/mvdan/sh/cmd/shfmt/main.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "log" - "os" - "path/filepath" - "regexp" - "runtime/pprof" - "strings" - - "github.com/mvdan/sh/syntax" -) - -var ( - write = flag.Bool("w", false, "write result to file instead of stdout") - list = flag.Bool("l", false, "list files whose formatting differs from shfmt's") - indent = flag.Int("i", 0, "indent: 0 for tabs (default), >0 for number of spaces") - posix = flag.Bool("p", false, "parse POSIX shell code instead of bash") - cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") - - parseMode syntax.ParseMode - printConfig syntax.PrintConfig - readBuf, writeBuf bytes.Buffer - - out io.Writer -) - -func main() { - flag.Parse() - if *cpuprofile != "" { - f, err := os.Create(*cpuprofile) - if err != nil { - log.Fatal(err) - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } - - out = os.Stdout - printConfig.Spaces = *indent - parseMode |= syntax.ParseComments - if *posix { - parseMode |= syntax.PosixConformant - } - if flag.NArg() == 0 { - if err := formatStdin(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - return - } - anyErr := false - onError := func(err error) { - anyErr = true - fmt.Fprintln(os.Stderr, err) - } - for _, path := range flag.Args() { - walk(path, onError) - } - if anyErr { - os.Exit(1) - } -} - -func formatStdin() error { - if *write || *list { - return fmt.Errorf("-w and -l can only be used on files") - } - readBuf.Reset() - if _, err := io.Copy(&readBuf, os.Stdin); err != nil { - return err - } - src := readBuf.Bytes() - prog, err := syntax.Parse(src, "", parseMode) - if err != nil { - return err - } - return printConfig.Fprint(out, prog) -} - -var ( - shellFile = regexp.MustCompile(`\.(sh|bash)$`) - validShebang = regexp.MustCompile(`^#!\s?/(usr/)?bin/(env *)?(sh|bash)`) - vcsDir = regexp.MustCompile(`^\.(git|svn|hg)$`) -) - -type shellConfidence int - -const ( - notShellFile shellConfidence = iota - ifValidShebang - isShellFile -) - -func getConfidence(info os.FileInfo) shellConfidence { - name := info.Name() - switch { - case info.IsDir(), name[0] == '.', !info.Mode().IsRegular(): - return notShellFile - case shellFile.MatchString(name): - return isShellFile - case strings.Contains(name, "."): - return notShellFile // different extension - case info.Size() < 8: - return notShellFile // cannot possibly hold valid shebang - default: - return ifValidShebang - } -} - -func walk(path string, onError func(error)) { - info, err := os.Stat(path) - if err != nil { - onError(err) - return - } - if !info.IsDir() { - if err := formatPath(path, false); err != nil { - onError(err) - } - return - } - filepath.Walk(path, func(path string, info os.FileInfo, err error) error { - if info.IsDir() && vcsDir.MatchString(info.Name()) { - return filepath.SkipDir - } - if err != nil { - onError(err) - return nil - } - conf := getConfidence(info) - if conf == notShellFile { - return nil - } - err = formatPath(path, conf == ifValidShebang) - if err != nil && !os.IsNotExist(err) { - onError(err) - } - return nil - }) -} - -func empty(f *os.File) error { - if err := f.Truncate(0); err != nil { - return err - } - _, err := f.Seek(0, 0) - return err -} - -func formatPath(path string, checkShebang bool) error { - openMode := os.O_RDONLY - if *write { - openMode = os.O_RDWR - } - f, err := os.OpenFile(path, openMode, 0) - if err != nil { - return err - } - defer f.Close() - readBuf.Reset() - if _, err := io.Copy(&readBuf, f); err != nil { - return err - } - src := readBuf.Bytes() - if checkShebang && !validShebang.Match(src[:32]) { - return nil - } - prog, err := syntax.Parse(src, path, parseMode) - if err != nil { - return err - } - writeBuf.Reset() - printConfig.Fprint(&writeBuf, prog) - res := writeBuf.Bytes() - if !bytes.Equal(src, res) { - if *list { - fmt.Fprintln(out, path) - } - if *write { - if err := empty(f); err != nil { - return err - } - if _, err := f.Write(res); err != nil { - return err - } - } - } - if !*list && !*write { - if _, err := out.Write(res); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/mvdan/sh/syntax/doc.go b/vendor/github.com/mvdan/sh/syntax/doc.go deleted file mode 100644 index eff8c2f..0000000 --- a/vendor/github.com/mvdan/sh/syntax/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -// Package syntax implements parsing and formatting of shell programs. -// It supports both POSIX Shell and Bash. -package syntax diff --git a/vendor/github.com/mvdan/sh/syntax/lexer.go b/vendor/github.com/mvdan/sh/syntax/lexer.go deleted file mode 100644 index 8b5cbd3..0000000 --- a/vendor/github.com/mvdan/sh/syntax/lexer.go +++ /dev/null @@ -1,962 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package syntax - -import ( - "bytes" -) - -// bytes that form or start a token -func regOps(b byte) bool { - return b == ';' || b == '"' || b == '\'' || b == '(' || - b == ')' || b == '$' || b == '|' || b == '&' || - b == '>' || b == '<' || b == '`' -} - -// tokenize these inside parameter expansions -func paramOps(b byte) bool { - return b == '}' || b == '#' || b == ':' || b == '-' || b == '+' || - b == '=' || b == '?' || b == '%' || b == '[' || b == '/' || - b == '^' || b == ',' -} - -// tokenize these inside arithmetic expansions -func arithmOps(b byte) bool { - return b == '+' || b == '-' || b == '!' || b == '*' || - b == '/' || b == '%' || b == '(' || b == ')' || - b == '^' || b == '<' || b == '>' || b == ':' || - b == '=' || b == ',' || b == '?' || b == '|' || - b == '&' -} - -func wordBreak(b byte) bool { - return b == ' ' || b == '\t' || b == '\r' || b == '\n' || - b == '&' || b == '>' || b == '<' || b == '|' || - b == ';' || b == '(' || b == ')' -} - -func (p *parser) next() { - if p.tok == _EOF || p.npos >= len(p.src) { - p.tok = _EOF - return - } - p.spaced, p.newLine = false, false - b, q := p.src[p.npos], p.quote - p.pos = Pos(p.npos + 1) - switch q { - case hdocWord: - if wordBreak(b) { - p.tok = illegalTok - p.spaced = true - return - } - case paramExpRepl: - switch b { - case '}': - p.npos++ - p.tok = rightBrace - case '/': - p.npos++ - p.tok = Quo - case '`', '"', '$': - p.tok = p.dqToken(b) - default: - p.advanceLitOther(q) - } - return - case dblQuotes: - if b == '`' || b == '"' || b == '$' { - p.tok = p.dqToken(b) - } else { - p.advanceLitDquote() - } - return - case hdocBody, hdocBodyTabs: - if b == '`' || b == '$' { - p.tok = p.dqToken(b) - } else if p.hdocStop == nil { - p.tok = illegalTok - } else { - p.advanceLitHdoc() - } - return - case paramExpExp: - switch b { - case '}': - p.npos++ - p.tok = rightBrace - case '`', '"', '$': - p.tok = p.dqToken(b) - default: - p.advanceLitOther(q) - } - return - case sglQuotes: - if b == '\'' { - p.npos++ - p.tok = sglQuote - } else { - p.advanceLitOther(q) - } - return - } -skipSpace: - for { - switch b { - case ' ', '\t', '\r': - p.spaced = true - p.npos++ - case '\n': - if p.quote == arithmExprLet { - p.tok = illegalTok - p.newLine, p.spaced = true, true - return - } - p.spaced = true - if p.npos < len(p.src) { - p.npos++ - } - p.f.Lines = append(p.f.Lines, p.npos) - p.newLine = true - if len(p.heredocs) > p.buriedHdocs { - p.doHeredocs() - if p.tok == _EOF { - return - } - } - case '\\': - if p.npos < len(p.src)-1 && p.src[p.npos+1] == '\n' { - p.npos += 2 - p.f.Lines = append(p.f.Lines, p.npos) - } else { - break skipSpace - } - default: - break skipSpace - } - if p.npos >= len(p.src) { - p.tok = _EOF - return - } - b = p.src[p.npos] - } - p.pos = Pos(p.npos + 1) - switch { - case q&allRegTokens != 0: - switch b { - case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`': - p.tok = p.regToken(b) - case '#': - p.npos++ - bs, _ := p.readUntil('\n') - p.npos += len(bs) - if p.mode&ParseComments > 0 { - p.f.Comments = append(p.f.Comments, &Comment{ - Hash: p.pos, - Text: string(bs), - }) - } - p.next() - case '?', '*', '+', '@', '!': - if p.bash() && p.npos+1 < len(p.src) && p.src[p.npos+1] == '(' { - switch b { - case '?': - p.tok = globQuest - case '*': - p.tok = globMul - case '+': - p.tok = globAdd - case '@': - p.tok = globAt - default: // '!' - p.tok = globNot - } - p.npos += 2 - } else { - p.advanceLitNone() - } - default: - p.advanceLitNone() - } - case q == paramExpName && paramOps(b): - p.tok = p.paramToken(b) - case q&allArithmExpr != 0 && arithmOps(b): - p.tok = p.arithmToken(b) - case q&allRbrack != 0 && b == ']': - p.npos++ - p.tok = rightBrack - case q == testRegexp: - p.advanceLitRe() - case regOps(b): - p.tok = p.regToken(b) - default: - p.advanceLitOther(q) - } -} - -func byteAt(src []byte, i int) byte { - if i >= len(src) { - return 0 - } - return src[i] -} - -func (p *parser) regToken(b byte) Token { - switch b { - case '\'': - p.npos++ - return sglQuote - case '"': - p.npos++ - return dblQuote - case '`': - p.npos++ - return bckQuote - case '&': - switch byteAt(p.src, p.npos+1) { - case '&': - p.npos += 2 - return AndExpr - case '>': - if !p.bash() { - break - } - if byteAt(p.src, p.npos+2) == '>' { - p.npos += 3 - return appAll - } - p.npos += 2 - return rdrAll - } - p.npos++ - return And - case '|': - switch byteAt(p.src, p.npos+1) { - case '|': - p.npos += 2 - return OrExpr - case '&': - if !p.bash() { - break - } - p.npos += 2 - return pipeAll - } - p.npos++ - return Or - case '$': - switch byteAt(p.src, p.npos+1) { - case '\'': - if !p.bash() { - break - } - p.npos += 2 - return dollSglQuote - case '"': - if !p.bash() { - break - } - p.npos += 2 - return dollDblQuote - case '{': - p.npos += 2 - return dollBrace - case '[': - if !p.bash() { - break - } - p.npos += 2 - return dollBrack - case '(': - if byteAt(p.src, p.npos+2) == '(' { - p.npos += 3 - return dollDblParen - } - p.npos += 2 - return dollParen - } - p.npos++ - return dollar - case '(': - if p.bash() && byteAt(p.src, p.npos+1) == '(' { - p.npos += 2 - return dblLeftParen - } - p.npos++ - return leftParen - case ')': - p.npos++ - return rightParen - case ';': - switch byteAt(p.src, p.npos+1) { - case ';': - if p.bash() && byteAt(p.src, p.npos+2) == '&' { - p.npos += 3 - return dblSemiFall - } - p.npos += 2 - return dblSemicolon - case '&': - if !p.bash() { - break - } - p.npos += 2 - return semiFall - } - p.npos++ - return semicolon - case '<': - switch byteAt(p.src, p.npos+1) { - case '<': - if b := byteAt(p.src, p.npos+2); b == '-' { - p.npos += 3 - return dashHdoc - } else if p.bash() && b == '<' { - p.npos += 3 - return wordHdoc - } - p.npos += 2 - return Shl - case '>': - p.npos += 2 - return rdrInOut - case '&': - p.npos += 2 - return dplIn - case '(': - if !p.bash() { - break - } - p.npos += 2 - return cmdIn - } - p.npos++ - return Lss - default: // '>' - switch byteAt(p.src, p.npos+1) { - case '>': - p.npos += 2 - return Shr - case '&': - p.npos += 2 - return dplOut - case '|': - p.npos += 2 - return clbOut - case '(': - if !p.bash() { - break - } - p.npos += 2 - return cmdOut - } - p.npos++ - return Gtr - } -} - -func (p *parser) dqToken(b byte) Token { - switch b { - case '"': - p.npos++ - return dblQuote - case '`': - p.npos++ - return bckQuote - default: // '$' - switch byteAt(p.src, p.npos+1) { - case '{': - p.npos += 2 - return dollBrace - case '[': - if !p.bash() { - break - } - p.npos += 2 - return dollBrack - case '(': - if byteAt(p.src, p.npos+2) == '(' { - p.npos += 3 - return dollDblParen - } - p.npos += 2 - return dollParen - } - p.npos++ - return dollar - } -} - -func (p *parser) paramToken(b byte) Token { - switch b { - case '}': - p.npos++ - return rightBrace - case ':': - switch byteAt(p.src, p.npos+1) { - case '+': - p.npos += 2 - return ColAdd - case '-': - p.npos += 2 - return ColSub - case '?': - p.npos += 2 - return ColQuest - case '=': - p.npos += 2 - return ColAssgn - } - p.npos++ - return Colon - case '+': - p.npos++ - return Add - case '-': - p.npos++ - return Sub - case '?': - p.npos++ - return Quest - case '=': - p.npos++ - return Assgn - case '%': - if byteAt(p.src, p.npos+1) == '%' { - p.npos += 2 - return dblRem - } - p.npos++ - return Rem - case '#': - if byteAt(p.src, p.npos+1) == '#' { - p.npos += 2 - return dblHash - } - p.npos++ - return Hash - case '[': - p.npos++ - return leftBrack - case '^': - if byteAt(p.src, p.npos+1) == '^' { - p.npos += 2 - return dblXor - } - p.npos++ - return Xor - case ',': - if byteAt(p.src, p.npos+1) == ',' { - p.npos += 2 - return dblComma - } - p.npos++ - return Comma - default: // '/' - if byteAt(p.src, p.npos+1) == '/' { - p.npos += 2 - return dblQuo - } - p.npos++ - return Quo - } -} - -func (p *parser) arithmToken(b byte) Token { - switch b { - case '!': - if byteAt(p.src, p.npos+1) == '=' { - p.npos += 2 - return Neq - } - p.npos++ - return Not - case '=': - if byteAt(p.src, p.npos+1) == '=' { - p.npos += 2 - return Eql - } - p.npos++ - return Assgn - case '(': - p.npos++ - return leftParen - case ')': - p.npos++ - return rightParen - case '&': - switch byteAt(p.src, p.npos+1) { - case '&': - p.npos += 2 - return AndExpr - case '=': - p.npos += 2 - return AndAssgn - } - p.npos++ - return And - case '|': - switch byteAt(p.src, p.npos+1) { - case '|': - p.npos += 2 - return OrExpr - case '=': - p.npos += 2 - return OrAssgn - } - p.npos++ - return Or - case '<': - switch byteAt(p.src, p.npos+1) { - case '<': - if byteAt(p.src, p.npos+2) == '=' { - p.npos += 3 - return ShlAssgn - } - p.npos += 2 - return Shl - case '=': - p.npos += 2 - return Leq - } - p.npos++ - return Lss - case '>': - switch byteAt(p.src, p.npos+1) { - case '>': - if byteAt(p.src, p.npos+2) == '=' { - p.npos += 3 - return ShrAssgn - } - p.npos += 2 - return Shr - case '=': - p.npos += 2 - return Geq - } - p.npos++ - return Gtr - case '+': - switch byteAt(p.src, p.npos+1) { - case '+': - p.npos += 2 - return Inc - case '=': - p.npos += 2 - return AddAssgn - } - p.npos++ - return Add - case '-': - switch byteAt(p.src, p.npos+1) { - case '-': - p.npos += 2 - return Dec - case '=': - p.npos += 2 - return SubAssgn - } - p.npos++ - return Sub - case '%': - if byteAt(p.src, p.npos+1) == '=' { - p.npos += 2 - return RemAssgn - } - p.npos++ - return Rem - case '*': - switch byteAt(p.src, p.npos+1) { - case '*': - p.npos += 2 - return Pow - case '=': - p.npos += 2 - return MulAssgn - } - p.npos++ - return Mul - case '/': - if byteAt(p.src, p.npos+1) == '=' { - p.npos += 2 - return QuoAssgn - } - p.npos++ - return Quo - case '^': - if byteAt(p.src, p.npos+1) == '=' { - p.npos += 2 - return XorAssgn - } - p.npos++ - return Xor - case ',': - p.npos++ - return Comma - case '?': - p.npos++ - return Quest - default: // ':' - p.npos++ - return Colon - } -} - -func (p *parser) advanceLitOther(q quoteState) { - bs := p.litBuf[:0] - for { - if p.npos >= len(p.src) { - p.tok, p.val = _LitWord, string(bs) - return - } - b := p.src[p.npos] - switch { - case b == '\\': // escaped byte follows - if p.npos == len(p.src)-1 { - p.npos++ - bs = append(bs, '\\') - p.tok, p.val = _LitWord, string(bs) - return - } - b = p.src[p.npos+1] - p.npos += 2 - if b == '\n' { - p.f.Lines = append(p.f.Lines, p.npos) - } else { - bs = append(bs, '\\', b) - } - continue - case q == sglQuotes: - switch b { - case '\n': - p.f.Lines = append(p.f.Lines, p.npos+1) - case '\'': - p.tok, p.val = _LitWord, string(bs) - return - } - case b == '`', b == '$': - p.tok, p.val = _Lit, string(bs) - return - case q == paramExpExp: - if b == '}' { - p.tok, p.val = _LitWord, string(bs) - return - } else if b == '"' { - p.tok, p.val = _Lit, string(bs) - return - } - case q == paramExpRepl: - if b == '}' || b == '/' { - p.tok, p.val = _LitWord, string(bs) - return - } - case wordBreak(b), regOps(b), q&allArithmExpr != 0 && arithmOps(b), - q == paramExpName && paramOps(b), - q&allRbrack != 0 && b == ']': - p.tok, p.val = _LitWord, string(bs) - return - } - bs = append(bs, p.src[p.npos]) - p.npos++ - } -} - -func (p *parser) advanceLitNone() { - var i int - tok := _Lit - p.asPos = 0 -loop: - for i = p.npos; i < len(p.src); i++ { - switch p.src[i] { - case '\\': // escaped byte follows - if i == len(p.src)-1 { - break - } - if i++; p.src[i] == '\n' { - p.f.Lines = append(p.f.Lines, i+1) - bs := p.src[p.npos : i-1] - p.npos = i + 1 - p.advanceLitNoneCont(bs) - return - } - case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')': - tok = _LitWord - break loop - case '?', '*', '+', '@', '!': - if p.bash() && i+1 < len(p.src) && p.src[i+1] == '(' { - break loop - } - case '`': - if p.quote == subCmdBckquo { - tok = _LitWord - } - break loop - case '"', '\'', '$': - break loop - case '=': - p.asPos = i - p.npos - if p.bash() && p.asPos > 0 && p.src[p.npos+p.asPos-1] == '+' { - p.asPos-- // a+=b - } - } - } - if i == len(p.src) { - tok = _LitWord - } - p.tok, p.val = tok, string(p.src[p.npos:i]) - p.npos = i -} - -func (p *parser) advanceLitNoneCont(bs []byte) { - for { - if p.npos >= len(p.src) { - p.tok, p.val = _LitWord, string(bs) - return - } - switch p.src[p.npos] { - case '\\': // escaped byte follows - if p.npos == len(p.src)-1 { - p.npos++ - bs = append(bs, '\\') - p.tok, p.val = _LitWord, string(bs) - return - } - b := p.src[p.npos+1] - p.npos += 2 - if b == '\n' { - p.f.Lines = append(p.f.Lines, p.npos) - } else { - bs = append(bs, '\\', b) - } - case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')': - p.tok, p.val = _LitWord, string(bs) - return - case '`': - if p.quote == subCmdBckquo { - p.tok, p.val = _LitWord, string(bs) - return - } - fallthrough - case '"', '\'', '$': - p.tok, p.val = _Lit, string(bs) - return - default: - bs = append(bs, p.src[p.npos]) - p.npos++ - } - } -} - -func (p *parser) advanceLitDquote() { - var i int - tok := _Lit -loop: - for i = p.npos; i < len(p.src); i++ { - switch p.src[i] { - case '\\': // escaped byte follows - if i == len(p.src)-1 { - break - } - if i++; p.src[i] == '\n' { - p.f.Lines = append(p.f.Lines, i+1) - } - case '"': - tok = _LitWord - break loop - case '`', '$': - break loop - case '\n': - p.f.Lines = append(p.f.Lines, i+1) - } - } - p.tok, p.val = tok, string(p.src[p.npos:i]) - p.npos = i -} - -func (p *parser) isHdocEnd(i int) bool { - end := p.hdocStop - if end == nil || len(p.src) < i+len(end) { - return false - } - if !bytes.Equal(end, p.src[i:i+len(end)]) { - return false - } - return len(p.src) == i+len(end) || p.src[i+len(end)] == '\n' -} - -func (p *parser) advanceLitHdoc() { - n := p.npos - if p.quote == hdocBodyTabs { - for n < len(p.src) && p.src[n] == '\t' { - n++ - } - } - if p.isHdocEnd(n) { - if n > p.npos { - p.tok, p.val = _LitWord, string(p.src[p.npos:n]) - } - p.npos = n + len(p.hdocStop) - p.hdocStop = nil - return - } - var i int -loop: - for i = p.npos; i < len(p.src); i++ { - switch p.src[i] { - case '\\': // escaped byte follows - if i++; i == len(p.src) { - break loop - } - if p.src[i] == '\n' { - p.f.Lines = append(p.f.Lines, i+1) - } - case '`', '$': - break loop - case '\n': - n := i + 1 - p.f.Lines = append(p.f.Lines, n) - if p.quote == hdocBodyTabs { - for n < len(p.src) && p.src[n] == '\t' { - n++ - } - } - if p.isHdocEnd(n) { - p.tok, p.val = _LitWord, string(p.src[p.npos:n]) - p.npos = n + len(p.hdocStop) - p.hdocStop = nil - return - } - } - } - p.tok, p.val = _Lit, string(p.src[p.npos:i]) - p.npos = i -} - -func (p *parser) hdocLitWord() Word { - pos := p.npos - end := pos - for p.npos < len(p.src) { - end = p.npos - bs, found := p.readUntil('\n') - p.npos += len(bs) + 1 - if found { - p.f.Lines = append(p.f.Lines, p.npos) - } - if p.quote == hdocBodyTabs { - for end < len(p.src) && p.src[end] == '\t' { - end++ - } - } - if p.isHdocEnd(end) { - break - } - } - if p.npos == len(p.src) { - end = p.npos - } - l := p.lit(Pos(pos+1), string(p.src[pos:end])) - return Word{Parts: p.singleWps(l)} -} - -func (p *parser) readUntil(b byte) ([]byte, bool) { - rem := p.src[p.npos:] - if i := bytes.IndexByte(rem, b); i >= 0 { - return rem[:i], true - } - return rem, false -} - -func (p *parser) advanceLitRe() { - end := bytes.Index(p.src[p.npos:], []byte(" ]]")) - p.tok = _LitWord - if end == -1 { - p.val = string(p.src[p.npos:]) - p.npos = len(p.src) - return - } - p.val = string(p.src[p.npos : p.npos+end]) - p.npos += end -} - -func testUnaryOp(val string) Token { - switch val { - case "!": - return Not - case "-e", "-a": - return tsExists - case "-f": - return tsRegFile - case "-d": - return tsDirect - case "-c": - return tsCharSp - case "-b": - return tsBlckSp - case "-p": - return tsNmPipe - case "-S": - return tsSocket - case "-L", "-h": - return tsSmbLink - case "-g": - return tsGIDSet - case "-u": - return tsUIDSet - case "-r": - return tsRead - case "-w": - return tsWrite - case "-x": - return tsExec - case "-s": - return tsNoEmpty - case "-t": - return tsFdTerm - case "-z": - return tsEmpStr - case "-n": - return tsNempStr - case "-o": - return tsOptSet - case "-v": - return tsVarSet - case "-R": - return tsRefVar - default: - return illegalTok - } -} - -func testBinaryOp(val string) Token { - switch val { - case "=": - return Assgn - case "==": - return Eql - case "!=": - return Neq - case "=~": - return tsReMatch - case "-nt": - return tsNewer - case "-ot": - return tsOlder - case "-ef": - return tsDevIno - case "-eq": - return tsEql - case "-ne": - return tsNeq - case "-le": - return tsLeq - case "-ge": - return tsGeq - case "-lt": - return tsLss - case "-gt": - return tsGtr - default: - return illegalTok - } -} diff --git a/vendor/github.com/mvdan/sh/syntax/nodes.go b/vendor/github.com/mvdan/sh/syntax/nodes.go deleted file mode 100644 index 786cafc..0000000 --- a/vendor/github.com/mvdan/sh/syntax/nodes.go +++ /dev/null @@ -1,717 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package syntax - -// Node represents an AST node. -type Node interface { - // Pos returns the first character of the node - Pos() Pos - // End returns the character immediately after the node - End() Pos -} - -// File is a shell program. -type File struct { - Name string - - Stmts []*Stmt - Comments []*Comment - - // Lines contains the offset of the first character for each - // line (the first entry is always 0) - Lines []int -} - -func (f *File) Pos() Pos { return stmtFirstPos(f.Stmts) } -func (f *File) End() Pos { return stmtLastEnd(f.Stmts) } - -func (f *File) Position(p Pos) (pos Position) { - intp := int(p) - pos.Offset = intp - 1 - if i := searchInts(f.Lines, intp); i >= 0 { - pos.Line, pos.Column = i+1, intp-f.Lines[i] - } - return -} - -// Inlined version of: -// sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 -func searchInts(a []int, x int) int { - i, j := 0, len(a) - for i < j { - h := i + (j-i)/2 - if a[h] <= x { - i = h + 1 - } else { - j = h - } - } - return i - 1 -} - -func posMax(p1, p2 Pos) Pos { - if p2 > p1 { - return p2 - } - return p1 -} - -// Comment represents a single comment on a single line. -type Comment struct { - Hash Pos - Text string -} - -func (c *Comment) Pos() Pos { return c.Hash } -func (c *Comment) End() Pos { return posAddStr(c.Hash, c.Text) } - -// Stmt represents a statement, otherwise known as a compound command. -// It is compromised of a command and other components that may come -// before or after it. -type Stmt struct { - Cmd Command - Position Pos - SemiPos Pos - Negated bool - Background bool - Assigns []*Assign - Redirs []*Redirect -} - -func (s *Stmt) Pos() Pos { return s.Position } -func (s *Stmt) End() Pos { - if s.SemiPos > 0 { - return s.SemiPos + 1 - } - end := s.Position - if s.Negated { - end = posAdd(end, 1) - } - if s.Cmd != nil { - end = s.Cmd.End() - } - if len(s.Assigns) > 0 { - assEnd := s.Assigns[len(s.Assigns)-1].End() - end = posMax(end, assEnd) - } - if len(s.Redirs) > 0 { - redEnd := s.Redirs[len(s.Redirs)-1].End() - end = posMax(end, redEnd) - } - return end -} - -// Command represents all nodes that are simple commands, which are -// directly placed in a Stmt. -type Command interface { - Node - commandNode() -} - -func (*CallExpr) commandNode() {} -func (*IfClause) commandNode() {} -func (*WhileClause) commandNode() {} -func (*UntilClause) commandNode() {} -func (*ForClause) commandNode() {} -func (*CaseClause) commandNode() {} -func (*Block) commandNode() {} -func (*Subshell) commandNode() {} -func (*BinaryCmd) commandNode() {} -func (*FuncDecl) commandNode() {} -func (*ArithmCmd) commandNode() {} -func (*TestClause) commandNode() {} -func (*DeclClause) commandNode() {} -func (*EvalClause) commandNode() {} -func (*LetClause) commandNode() {} -func (*CoprocClause) commandNode() {} - -// Assign represents an assignment to a variable. -type Assign struct { - Append bool - Name *Lit - Value Word -} - -func (a *Assign) Pos() Pos { - if a.Name == nil { - return a.Value.Pos() - } - return a.Name.Pos() -} - -func (a *Assign) End() Pos { - if a.Name != nil { - return posMax(a.Name.End(), a.Value.End()) - } - return a.Value.End() -} - -// Redirect represents an input/output redirection. -type Redirect struct { - OpPos Pos - Op RedirOperator - N *Lit - Word, Hdoc Word -} - -func (r *Redirect) Pos() Pos { - if r.N != nil { - return r.N.Pos() - } - return r.OpPos -} -func (r *Redirect) End() Pos { return r.Word.End() } - -// CallExpr represents a command execution or function call. -type CallExpr struct { - Args []Word -} - -func (c *CallExpr) Pos() Pos { return c.Args[0].Pos() } -func (c *CallExpr) End() Pos { return c.Args[len(c.Args)-1].End() } - -// Subshell represents a series of commands that should be executed in a -// nested shell environment. -type Subshell struct { - Lparen, Rparen Pos - Stmts []*Stmt -} - -func (s *Subshell) Pos() Pos { return s.Lparen } -func (s *Subshell) End() Pos { return posAdd(s.Rparen, 1) } - -// Block represents a series of commands that should be executed in a -// nested scope. -type Block struct { - Lbrace, Rbrace Pos - Stmts []*Stmt -} - -func (b *Block) Pos() Pos { return b.Rbrace } -func (b *Block) End() Pos { return posAdd(b.Rbrace, 1) } - -// IfClause represents an if statement. -type IfClause struct { - If, Then, Fi Pos - CondStmts []*Stmt - ThenStmts []*Stmt - Elifs []*Elif - Else Pos - ElseStmts []*Stmt -} - -func (c *IfClause) Pos() Pos { return c.If } -func (c *IfClause) End() Pos { return posAdd(c.Fi, 2) } - -// Elif represents an "else if" case in an if clause. -type Elif struct { - Elif, Then Pos - CondStmts []*Stmt - ThenStmts []*Stmt -} - -// WhileClause represents a while clause. -type WhileClause struct { - While, Do, Done Pos - CondStmts []*Stmt - DoStmts []*Stmt -} - -func (w *WhileClause) Pos() Pos { return w.While } -func (w *WhileClause) End() Pos { return posAdd(w.Done, 4) } - -// UntilClause represents an until clause. -type UntilClause struct { - Until, Do, Done Pos - CondStmts []*Stmt - DoStmts []*Stmt -} - -func (u *UntilClause) Pos() Pos { return u.Until } -func (u *UntilClause) End() Pos { return posAdd(u.Done, 4) } - -// ForClause represents a for clause. -type ForClause struct { - For, Do, Done Pos - Loop Loop - DoStmts []*Stmt -} - -func (f *ForClause) Pos() Pos { return f.For } -func (f *ForClause) End() Pos { return posAdd(f.Done, 4) } - -// Loop represents all nodes that can be loops in a for clause. -type Loop interface { - Node - loopNode() -} - -func (*WordIter) loopNode() {} -func (*CStyleLoop) loopNode() {} - -// WordIter represents the iteration of a variable over a series of -// words in a for clause. -type WordIter struct { - Name Lit - List []Word -} - -func (w *WordIter) Pos() Pos { return w.Name.Pos() } -func (w *WordIter) End() Pos { return posMax(w.Name.End(), wordLastEnd(w.List)) } - -// CStyleLoop represents the behaviour of a for clause similar to the C -// language. -// -// This node will never appear when in PosixConformant mode. -type CStyleLoop struct { - Lparen, Rparen Pos - Init, Cond, Post ArithmExpr -} - -func (c *CStyleLoop) Pos() Pos { return c.Lparen } -func (c *CStyleLoop) End() Pos { return posAdd(c.Rparen, 2) } - -// BinaryCmd represents a binary expression between two statements. -type BinaryCmd struct { - OpPos Pos - Op BinCmdOperator - X, Y *Stmt -} - -func (b *BinaryCmd) Pos() Pos { return b.X.Pos() } -func (b *BinaryCmd) End() Pos { return b.Y.End() } - -// FuncDecl represents the declaration of a function. -type FuncDecl struct { - Position Pos - BashStyle bool - Name Lit - Body *Stmt -} - -func (f *FuncDecl) Pos() Pos { return f.Position } -func (f *FuncDecl) End() Pos { return f.Body.End() } - -// Word represents a list of nodes that are contiguous to each other. -// The word is delimeted by word boundaries. -type Word struct { - Parts []WordPart -} - -func (w *Word) Pos() Pos { return partsFirstPos(w.Parts) } -func (w *Word) End() Pos { return partsLastEnd(w.Parts) } - -// WordPart represents all nodes that can form a word. -type WordPart interface { - Node - wordPartNode() -} - -func (*Lit) wordPartNode() {} -func (*SglQuoted) wordPartNode() {} -func (*DblQuoted) wordPartNode() {} -func (*ParamExp) wordPartNode() {} -func (*CmdSubst) wordPartNode() {} -func (*ArithmExp) wordPartNode() {} -func (*ProcSubst) wordPartNode() {} -func (*ArrayExpr) wordPartNode() {} -func (*ExtGlob) wordPartNode() {} - -// Lit represents an unquoted string consisting of characters that were -// not tokenized. -type Lit struct { - ValuePos Pos - Value string -} - -func (l *Lit) Pos() Pos { return l.ValuePos } -func (l *Lit) End() Pos { return posAddStr(l.ValuePos, l.Value) } - -// SglQuoted represents a string within single quotes. -type SglQuoted struct { - Position Pos - Dollar bool - Value string -} - -func (q *SglQuoted) Pos() Pos { return q.Position } -func (q *SglQuoted) End() Pos { - pos := posAdd(q.Position, 2+len(q.Value)) - if pos > 0 && q.Dollar { - pos++ - } - return pos -} - -// DblQuoted represents a list of nodes within double quotes. -type DblQuoted struct { - Position Pos - Dollar bool - Parts []WordPart -} - -func (q *DblQuoted) Pos() Pos { return q.Position } -func (q *DblQuoted) End() Pos { - if q.Position == 0 { - return defaultPos - } - end := q.Position - if len(q.Parts) > 0 { - end = partsLastEnd(q.Parts) - } else if q.Dollar { - end += 2 - } else { - end++ - } - return posAdd(end, 1) -} - -// CmdSubst represents a command substitution. -type CmdSubst struct { - Left, Right Pos - Stmts []*Stmt -} - -func (c *CmdSubst) Pos() Pos { return c.Left } -func (c *CmdSubst) End() Pos { return posAdd(c.Right, 1) } - -// ParamExp represents a parameter expansion. -type ParamExp struct { - Dollar, Rbrace Pos - Short, Length bool - Param Lit - Ind *Index - Slice *Slice - Repl *Replace - Exp *Expansion -} - -func (p *ParamExp) Pos() Pos { return p.Dollar } -func (p *ParamExp) End() Pos { - if p.Rbrace > 0 { - return p.Rbrace + 1 - } - return p.Param.End() -} - -// Index represents access to an array via an index inside a ParamExp. -// -// This node will never appear when in PosixConformant mode. -type Index struct { - Word Word -} - -// Slice represents character slicing inside a ParamExp. -// -// This node will never appear when in PosixConformant mode. -type Slice struct { - Offset, Length Word -} - -// Replace represents a search and replace inside a ParamExp. -type Replace struct { - All bool - Orig, With Word -} - -// Expansion represents string manipulation in a ParamExp other than -// those covered by Replace. -type Expansion struct { - Op ParExpOperator - Word Word -} - -// ArithmExp represents an arithmetic expansion. -type ArithmExp struct { - Left, Right Pos - Bracket bool - X ArithmExpr -} - -func (a *ArithmExp) Pos() Pos { return a.Left } -func (a *ArithmExp) End() Pos { - if a.Bracket { - return posAdd(a.Right, 1) - } - return posAdd(a.Right, 2) -} - -// ArithmCmd represents an arithmetic command. -// -// This node will never appear when in PosixConformant mode. -type ArithmCmd struct { - Left, Right Pos - X ArithmExpr -} - -func (a *ArithmCmd) Pos() Pos { return a.Left } -func (a *ArithmCmd) End() Pos { return posAdd(a.Right, 2) } - -// ArithmExpr represents all nodes that form arithmetic expressions. -type ArithmExpr interface { - Node - arithmExprNode() -} - -func (*BinaryArithm) arithmExprNode() {} -func (*UnaryArithm) arithmExprNode() {} -func (*ParenArithm) arithmExprNode() {} -func (*Word) arithmExprNode() {} - -// BinaryArithm represents a binary expression between two arithmetic -// expression. -type BinaryArithm struct { - OpPos Pos - Op Token - X, Y ArithmExpr -} - -func (b *BinaryArithm) Pos() Pos { return b.X.Pos() } -func (b *BinaryArithm) End() Pos { return b.Y.End() } - -// UnaryArithm represents an unary expression over a node, either before -// or after it. -type UnaryArithm struct { - OpPos Pos - Op Token - Post bool - X ArithmExpr -} - -func (u *UnaryArithm) Pos() Pos { - if u.Post { - return u.X.Pos() - } - return u.OpPos -} - -func (u *UnaryArithm) End() Pos { - if u.Post { - return posAdd(u.OpPos, 2) - } - return u.X.End() -} - -// ParenArithm represents an expression within parentheses inside an -// ArithmExp. -type ParenArithm struct { - Lparen, Rparen Pos - X ArithmExpr -} - -func (p *ParenArithm) Pos() Pos { return p.Lparen } -func (p *ParenArithm) End() Pos { return posAdd(p.Rparen, 1) } - -// CaseClause represents a case (switch) clause. -type CaseClause struct { - Case, Esac Pos - Word Word - List []*PatternList -} - -func (c *CaseClause) Pos() Pos { return c.Case } -func (c *CaseClause) End() Pos { return posAdd(c.Esac, 4) } - -// PatternList represents a pattern list (case) within a CaseClause. -type PatternList struct { - Op CaseOperator - OpPos Pos - Patterns []Word - Stmts []*Stmt -} - -// TestClause represents a Bash extended test clause. -// -// This node will never appear when in PosixConformant mode. -type TestClause struct { - Left, Right Pos - X TestExpr -} - -func (t *TestClause) Pos() Pos { return t.Left } -func (t *TestClause) End() Pos { return posAdd(t.Right, 2) } - -// TestExpr represents all nodes that form arithmetic expressions. -type TestExpr interface { - Node - testExprNode() -} - -func (*BinaryTest) testExprNode() {} -func (*UnaryTest) testExprNode() {} -func (*ParenTest) testExprNode() {} -func (*Word) testExprNode() {} - -// BinaryTest represents a binary expression between two arithmetic -// expression. -type BinaryTest struct { - OpPos Pos - Op BinTestOperator - X, Y TestExpr -} - -func (b *BinaryTest) Pos() Pos { return b.X.Pos() } -func (b *BinaryTest) End() Pos { return b.Y.End() } - -// UnaryTest represents an unary expression over a node, either before -// or after it. -type UnaryTest struct { - OpPos Pos - Op UnTestOperator - X TestExpr -} - -func (u *UnaryTest) Pos() Pos { return u.OpPos } -func (u *UnaryTest) End() Pos { return u.X.End() } - -// ParenTest represents an expression within parentheses inside an -// TestExp. -type ParenTest struct { - Lparen, Rparen Pos - X TestExpr -} - -func (p *ParenTest) Pos() Pos { return p.Lparen } -func (p *ParenTest) End() Pos { return posAdd(p.Rparen, 1) } - -// DeclClause represents a Bash declare clause. -// -// This node will never appear when in PosixConformant mode. -type DeclClause struct { - Position Pos - Variant string - Opts []Word - Assigns []*Assign -} - -func (d *DeclClause) Pos() Pos { return d.Position } -func (d *DeclClause) End() Pos { - end := wordLastEnd(d.Opts) - if len(d.Assigns) > 0 { - assignEnd := d.Assigns[len(d.Assigns)-1].End() - end = posMax(end, assignEnd) - } - return end -} - -// ArrayExpr represents a Bash array expression. -// -// This node will never appear when in PosixConformant mode. -type ArrayExpr struct { - Lparen, Rparen Pos - List []Word -} - -func (a *ArrayExpr) Pos() Pos { return a.Lparen } -func (a *ArrayExpr) End() Pos { return posAdd(a.Rparen, 1) } - -// ExtGlob represents a Bash extended globbing expression. Note that -// these are parsed independently of whether shopt has been called or -// not. -// -// This node will never appear when in PosixConformant mode. -type ExtGlob struct { - Op GlobOperator - Pattern Lit -} - -func (e *ExtGlob) Pos() Pos { return posAdd(e.Pattern.Pos(), -2) } -func (e *ExtGlob) End() Pos { return posAdd(e.Pattern.End(), 1) } - -// ProcSubst represents a Bash process substitution. -// -// This node will never appear when in PosixConformant mode. -type ProcSubst struct { - OpPos, Rparen Pos - Op ProcOperator - Stmts []*Stmt -} - -func (s *ProcSubst) Pos() Pos { return s.OpPos } -func (s *ProcSubst) End() Pos { return posAdd(s.Rparen, 1) } - -// EvalClause represents a Bash eval clause. -// -// This node will never appear when in PosixConformant mode. -type EvalClause struct { - Eval Pos - Stmt *Stmt -} - -func (e *EvalClause) Pos() Pos { return e.Eval } -func (e *EvalClause) End() Pos { - if e.Stmt == nil { - return posAdd(e.Eval, 4) - } - return e.Stmt.End() -} - -// CoprocClause represents a Bash coproc clause. -// -// This node will never appear when in PosixConformant mode. -type CoprocClause struct { - Coproc Pos - Name *Lit - Stmt *Stmt -} - -func (c *CoprocClause) Pos() Pos { return c.Coproc } -func (c *CoprocClause) End() Pos { return c.Stmt.End() } - -// LetClause represents a Bash let clause. -// -// This node will never appear when in PosixConformant mode. -type LetClause struct { - Let Pos - Exprs []ArithmExpr -} - -func (l *LetClause) Pos() Pos { return l.Let } -func (l *LetClause) End() Pos { return l.Exprs[len(l.Exprs)-1].End() } - -func posAdd(pos Pos, n int) Pos { - if pos == defaultPos { - return pos - } - return pos + Pos(n) -} - -func posAddStr(pos Pos, s string) Pos { - return posAdd(pos, len(s)) -} - -func stmtFirstPos(sts []*Stmt) Pos { - if len(sts) == 0 { - return defaultPos - } - return sts[0].Pos() -} - -func stmtLastEnd(sts []*Stmt) Pos { - if len(sts) == 0 { - return defaultPos - } - return sts[len(sts)-1].End() -} - -func partsFirstPos(ps []WordPart) Pos { - if len(ps) == 0 { - return defaultPos - } - return ps[0].Pos() -} - -func partsLastEnd(ps []WordPart) Pos { - if len(ps) == 0 { - return defaultPos - } - return ps[len(ps)-1].End() -} - -func wordLastEnd(ws []Word) Pos { - if len(ws) == 0 { - return defaultPos - } - return ws[len(ws)-1].End() -} diff --git a/vendor/github.com/mvdan/sh/syntax/parser.go b/vendor/github.com/mvdan/sh/syntax/parser.go deleted file mode 100644 index c9ef38e..0000000 --- a/vendor/github.com/mvdan/sh/syntax/parser.go +++ /dev/null @@ -1,1659 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package syntax - -import ( - "bytes" - "fmt" - "strconv" - "sync" -) - -// ParseMode controls the parser behaviour via a set of flags. -type ParseMode uint - -const ( - ParseComments ParseMode = 1 << iota // add comments to the AST - PosixConformant // match the POSIX standard where it differs from bash -) - -var parserFree = sync.Pool{ - New: func() interface{} { - return &parser{helperBuf: new(bytes.Buffer)} - }, -} - -// Parse reads and parses a shell program with an optional name. It -// returns the parsed program if no issues were encountered. Otherwise, -// an error is returned. -func Parse(src []byte, name string, mode ParseMode) (*File, error) { - p := parserFree.Get().(*parser) - p.reset() - alloc := &struct { - f File - l [16]int - }{} - p.f = &alloc.f - p.f.Name = name - p.f.Lines = alloc.l[:1] - p.src, p.mode = src, mode - p.next() - p.f.Stmts = p.stmts() - parserFree.Put(p) - return p.f, p.err -} - -type parser struct { - src []byte - - f *File - mode ParseMode - - spaced, newLine bool - - err error - - tok Token - val string - - pos Pos - npos int - - quote quoteState - asPos int - - // list of pending heredoc bodies - buriedHdocs int - heredocs []*Redirect - hdocStop []byte - - helperBuf *bytes.Buffer - - litBatch []Lit - wpsBatch []WordPart - stmtBatch []Stmt - stListBatch []*Stmt - - litBuf [32]byte -} - -func (p *parser) lit(pos Pos, val string) *Lit { - if len(p.litBatch) == 0 { - p.litBatch = make([]Lit, 32) - } - l := &p.litBatch[0] - l.ValuePos = pos - l.Value = val - p.litBatch = p.litBatch[1:] - return l -} - -func (p *parser) singleWps(wp WordPart) []WordPart { - if len(p.wpsBatch) == 0 { - p.wpsBatch = make([]WordPart, 64) - } - wps := p.wpsBatch[:1:1] - p.wpsBatch = p.wpsBatch[1:] - wps[0] = wp - return wps -} - -func (p *parser) wps() []WordPart { - if len(p.wpsBatch) < 4 { - p.wpsBatch = make([]WordPart, 64) - } - wps := p.wpsBatch[:0:4] - p.wpsBatch = p.wpsBatch[4:] - return wps -} - -func (p *parser) stmt(pos Pos) *Stmt { - if len(p.stmtBatch) == 0 { - p.stmtBatch = make([]Stmt, 16) - } - s := &p.stmtBatch[0] - s.Position = pos - p.stmtBatch = p.stmtBatch[1:] - return s -} - -func (p *parser) stList() []*Stmt { - if len(p.stListBatch) == 0 { - p.stListBatch = make([]*Stmt, 128) - } - stmts := p.stListBatch[:0:4] - p.stListBatch = p.stListBatch[4:] - return stmts -} - -type quoteState int - -const ( - noState quoteState = 1 << iota - subCmd - subCmdBckquo - sglQuotes - dblQuotes - hdocWord - hdocBody - hdocBodyTabs - arithmExpr - arithmExprLet - arithmExprCmd - arithmExprBrack - testRegexp - switchCase - paramExpName - paramExpInd - paramExpRepl - paramExpExp - - allRegTokens = noState | subCmd | subCmdBckquo | hdocWord | switchCase - allArithmExpr = arithmExpr | arithmExprLet | arithmExprCmd | arithmExprBrack - allRbrack = arithmExprBrack | paramExpInd -) - -func (p *parser) bash() bool { return p.mode&PosixConformant == 0 } - -func (p *parser) reset() { - p.spaced, p.newLine = false, false - p.err = nil - p.npos = 0 - p.tok, p.quote = illegalTok, noState - p.heredocs = p.heredocs[:0] - p.buriedHdocs = 0 -} - -type saveState struct { - quote quoteState - buriedHdocs int -} - -func (p *parser) preNested(quote quoteState) (s saveState) { - s.quote = p.quote - s.buriedHdocs = p.buriedHdocs - p.buriedHdocs = len(p.heredocs) - p.quote = quote - return -} - -func (p *parser) postNested(s saveState) { - p.quote, p.buriedHdocs = s.quote, s.buriedHdocs -} - -func (p *parser) unquotedWordBytes(w Word) ([]byte, bool) { - p.helperBuf.Reset() - didUnquote := false - for _, wp := range w.Parts { - if p.unquotedWordPart(p.helperBuf, wp) { - didUnquote = true - } - } - return p.helperBuf.Bytes(), didUnquote -} - -func (p *parser) unquotedWordPart(b *bytes.Buffer, wp WordPart) bool { - switch x := wp.(type) { - case *Lit: - if x.Value[0] == '\\' { - b.WriteString(x.Value[1:]) - return true - } - b.WriteString(x.Value) - return false - case *SglQuoted: - b.WriteString(x.Value) - return true - case *DblQuoted: - for _, wp2 := range x.Parts { - p.unquotedWordPart(b, wp2) - } - return true - default: - // catch-all for unusual cases such as ParamExp - b.Write(p.src[wp.Pos()-1 : wp.End()-1]) - return false - } -} - -func (p *parser) doHeredocs() { - p.tok = illegalTok - old := p.quote - hdocs := p.heredocs[p.buriedHdocs:] - p.heredocs = p.heredocs[:p.buriedHdocs] - for i, r := range hdocs { - if r.Op == DashHdoc { - p.quote = hdocBodyTabs - } else { - p.quote = hdocBody - } - var quoted bool - p.hdocStop, quoted = p.unquotedWordBytes(r.Word) - if i > 0 && p.npos < len(p.src) && p.src[p.npos] == '\n' { - p.npos++ - p.f.Lines = append(p.f.Lines, p.npos) - } - if !quoted { - p.next() - r.Hdoc = p.word() - continue - } - r.Hdoc = p.hdocLitWord() - - } - p.quote = old -} - -func (p *parser) got(tok Token) bool { - if p.tok == tok { - p.next() - return true - } - return false -} - -func (p *parser) gotRsrv(val string) bool { - if p.tok == _LitWord && p.val == val { - p.next() - return true - } - return false -} - -func (p *parser) gotSameLine(tok Token) bool { - if !p.newLine && p.tok == tok { - p.next() - return true - } - return false -} - -func readableStr(s string) string { - // don't quote tokens like & or } - if s != "" && s[0] >= 'a' && s[0] <= 'z' { - return strconv.Quote(s) - } - return s -} - -func (p *parser) followErr(pos Pos, left, right string) { - leftStr := readableStr(left) - p.posErr(pos, "%s must be followed by %s", leftStr, right) -} - -func (p *parser) follow(lpos Pos, left string, tok Token) Pos { - pos := p.pos - if !p.got(tok) { - p.followErr(lpos, left, tok.String()) - } - return pos -} - -func (p *parser) followRsrv(lpos Pos, left, val string) Pos { - pos := p.pos - if !p.gotRsrv(val) { - p.followErr(lpos, left, fmt.Sprintf("%q", val)) - } - return pos -} - -func (p *parser) followStmts(left string, lpos Pos, stops ...string) []*Stmt { - if p.gotSameLine(semicolon) { - return nil - } - sts := p.stmts(stops...) - if len(sts) < 1 && !p.newLine { - p.followErr(lpos, left, "a statement list") - } - return sts -} - -func (p *parser) followWordTok(tok Token, pos Pos) Word { - w := p.word() - if w.Parts == nil { - p.followErr(pos, tok.String(), "a word") - } - return w -} - -func (p *parser) followWord(s string, pos Pos) Word { - w := p.word() - if w.Parts == nil { - p.followErr(pos, s, "a word") - } - return w -} - -func (p *parser) stmtEnd(n Node, start, end string) Pos { - pos := p.pos - if !p.gotRsrv(end) { - p.posErr(n.Pos(), "%s statement must end with %q", start, end) - } - return pos -} - -func (p *parser) quoteErr(lpos Pos, quote Token) { - p.posErr(lpos, "reached %s without closing quote %s", p.tok, quote) -} - -func (p *parser) matchingErr(lpos Pos, left, right interface{}) { - p.posErr(lpos, "reached %s without matching %s with %s", p.tok, left, right) -} - -func (p *parser) matched(lpos Pos, left, right Token) Pos { - pos := p.pos - if !p.got(right) { - p.matchingErr(lpos, left, right) - } - return pos -} - -func (p *parser) errPass(err error) { - if p.err == nil { - p.err = err - p.tok = _EOF - } -} - -// ParseError represents an error found when parsing a source file. -type ParseError struct { - Position - Filename, Text string -} - -func (e *ParseError) Error() string { - prefix := "" - if e.Filename != "" { - prefix = e.Filename + ":" - } - return fmt.Sprintf("%s%d:%d: %s", prefix, e.Line, e.Column, e.Text) -} - -func (p *parser) posErr(pos Pos, format string, a ...interface{}) { - p.errPass(&ParseError{ - Position: p.f.Position(pos), - Filename: p.f.Name, - Text: fmt.Sprintf(format, a...), - }) -} - -func (p *parser) curErr(format string, a ...interface{}) { - p.posErr(p.pos, format, a...) -} - -func (p *parser) stmts(stops ...string) (sts []*Stmt) { - q := p.quote - gotEnd := true - for p.tok != _EOF { - switch p.tok { - case _LitWord: - for _, stop := range stops { - if p.val == stop { - return - } - } - case rightParen: - if q == subCmd { - return - } - case bckQuote: - if q == subCmdBckquo { - return - } - case dblSemicolon, semiFall, dblSemiFall: - if q == switchCase { - return - } - p.curErr("%s can only be used in a case clause", p.tok) - } - if !p.newLine && !gotEnd { - p.curErr("statements must be separated by &, ; or a newline") - } - if p.tok == _EOF { - break - } - if s, end := p.getStmt(true); s == nil { - p.invalidStmtStart() - } else { - if sts == nil { - sts = p.stList() - } - sts = append(sts, s) - gotEnd = end - } - } - return -} - -func (p *parser) invalidStmtStart() { - switch p.tok { - case semicolon, And, Or, AndExpr, OrExpr: - p.curErr("%s can only immediately follow a statement", p.tok) - case rightParen: - p.curErr("%s can only be used to close a subshell", p.tok) - default: - p.curErr("%s is not a valid start for a statement", p.tok) - } -} - -func (p *parser) word() Word { - if p.tok == _LitWord { - w := Word{Parts: p.singleWps(p.lit(p.pos, p.val))} - p.next() - return w - } - return Word{Parts: p.wordParts()} -} - -func (p *parser) gotLit(l *Lit) bool { - l.ValuePos = p.pos - if p.tok == _Lit || p.tok == _LitWord { - l.Value = p.val - p.next() - return true - } - return false -} - -func (p *parser) wordParts() (wps []WordPart) { - for { - n := p.wordPart() - if n == nil { - return - } - if wps == nil { - wps = p.wps() - } - wps = append(wps, n) - if p.spaced { - return - } - } -} - -func (p *parser) wordPart() WordPart { - switch p.tok { - case _Lit, _LitWord: - l := p.lit(p.pos, p.val) - p.next() - return l - case dollBrace: - return p.paramExp() - case dollDblParen, dollBrack: - left := p.tok - ar := &ArithmExp{Left: p.pos, Bracket: left == dollBrack} - old := p.preNested(arithmExpr) - if ar.Bracket { - p.quote = arithmExprBrack - } else if !p.couldBeArithm() { - p.postNested(old) - p.npos = int(ar.Left) + 1 - p.tok = dollParen - p.pos = ar.Left - wp := p.wordPart() - if p.err != nil { - p.err = nil - p.matchingErr(ar.Left, dollDblParen, dblRightParen) - } - return wp - } - p.next() - ar.X = p.arithmExpr(left, ar.Left, 0, false) - if ar.Bracket { - if p.tok != rightBrack { - p.matchingErr(ar.Left, dollBrack, rightBrack) - } - p.postNested(old) - ar.Right = p.pos - p.next() - } else { - ar.Right = p.arithmEnd(dollDblParen, ar.Left, old) - } - return ar - case dollParen: - if p.quote == hdocWord { - p.curErr("nested statements not allowed in heredoc words") - } - cs := &CmdSubst{Left: p.pos} - old := p.preNested(subCmd) - p.next() - cs.Stmts = p.stmts() - p.postNested(old) - cs.Right = p.matched(cs.Left, leftParen, rightParen) - return cs - case dollar: - var b byte - if p.npos >= len(p.src) { - p.tok = _EOF - } else { - b = p.src[p.npos] - } - if p.tok == _EOF || wordBreak(b) || b == '"' || b == '\'' || b == '`' || b == '[' { - l := p.lit(p.pos, "$") - p.next() - return l - } - pe := &ParamExp{Dollar: p.pos, Short: true} - p.pos++ - switch b { - case '@', '*', '#', '$', '?', '!', '0', '-': - p.npos++ - p.tok, p.val = _Lit, string(b) - default: - p.advanceLitOther(p.quote) - } - p.gotLit(&pe.Param) - return pe - case cmdIn, cmdOut: - ps := &ProcSubst{Op: ProcOperator(p.tok), OpPos: p.pos} - old := p.preNested(subCmd) - p.next() - ps.Stmts = p.stmts() - p.postNested(old) - ps.Rparen = p.matched(ps.OpPos, Token(ps.Op), rightParen) - return ps - case sglQuote: - sq := &SglQuoted{Position: p.pos} - bs, found := p.readUntil('\'') - rem := bs - for { - i := bytes.IndexByte(rem, '\n') - if i < 0 { - p.npos += len(rem) - break - } - p.npos += i + 1 - p.f.Lines = append(p.f.Lines, p.npos) - rem = rem[i+1:] - } - p.npos++ - if !found { - p.posErr(sq.Pos(), "reached EOF without closing quote %s", sglQuote) - } - sq.Value = string(bs) - p.next() - return sq - case dollSglQuote: - sq := &SglQuoted{Position: p.pos, Dollar: true} - old := p.quote - p.quote = sglQuotes - p.next() - if p.tok == sglQuote { - p.quote = old - } else { - sq.Value = p.val - p.quote = old - p.next() - } - if !p.got(sglQuote) { - p.quoteErr(sq.Pos(), sglQuote) - } - return sq - case dblQuote: - if p.quote == dblQuotes { - return nil - } - fallthrough - case dollDblQuote: - q := &DblQuoted{Position: p.pos, Dollar: p.tok == dollDblQuote} - old := p.quote - p.quote = dblQuotes - p.next() - if p.tok == _LitWord { - q.Parts = p.singleWps(p.lit(p.pos, p.val)) - p.next() - } else { - q.Parts = p.wordParts() - } - p.quote = old - if !p.got(dblQuote) { - p.quoteErr(q.Pos(), dblQuote) - } - return q - case bckQuote: - switch p.quote { - case hdocWord: - p.curErr("nested statements not allowed in heredoc words") - case subCmdBckquo: - return nil - } - cs := &CmdSubst{Left: p.pos} - old := p.preNested(subCmdBckquo) - p.next() - cs.Stmts = p.stmts() - p.postNested(old) - cs.Right = p.pos - if !p.got(bckQuote) { - p.quoteErr(cs.Pos(), bckQuote) - } - return cs - case globQuest, globMul, globAdd, globAt, globNot: - eg := &ExtGlob{Op: GlobOperator(p.tok)} - eg.Pattern.ValuePos = Pos(p.npos + 1) - start := p.npos - lparens := 0 - for _, b := range p.src[start:] { - p.npos++ - if b == '(' { - lparens++ - } else if b == ')' { - if lparens--; lparens < 0 { - eg.Pattern.Value = string(p.src[start : p.npos-1]) - break - } - } - } - p.next() - if lparens != -1 { - p.matchingErr(p.pos, eg.Op, rightParen) - } - return eg - } - return nil -} - -func (p *parser) couldBeArithm() (could bool) { - // save state - oldTok := p.tok - oldNpos := p.npos - oldLines := len(p.f.Lines) - p.next() - lparens := 0 -tokLoop: - for p.tok != _EOF { - switch p.tok { - case leftParen, dollParen: - lparens++ - case dollDblParen, dblLeftParen: - lparens += 2 - case rightParen: - if lparens == 0 { - could = p.peekArithmEnd() - break tokLoop - } - lparens-- - } - p.next() - } - // recover state - p.tok = oldTok - p.npos = oldNpos - p.f.Lines = p.f.Lines[:oldLines] - return -} - -func arithmOpLevel(tok Token) int { - switch tok { - case Comma: - return 0 - case AddAssgn, SubAssgn, MulAssgn, QuoAssgn, RemAssgn, AndAssgn, - OrAssgn, XorAssgn, ShlAssgn, ShrAssgn: - return 1 - case Assgn: - return 2 - case Quest, Colon: - return 3 - case AndExpr, OrExpr: - return 4 - case And, Or, Xor: - return 5 - case Eql, Neq: - return 6 - case Lss, Gtr, Leq, Geq: - return 7 - case Shl, Shr: - return 8 - case Add, Sub: - return 9 - case Mul, Quo, Rem: - return 10 - case Pow: - return 11 - } - return -1 -} - -func (p *parser) arithmExpr(ftok Token, fpos Pos, level int, compact bool) ArithmExpr { - if p.tok == _EOF || p.peekArithmEnd() { - return nil - } - var left ArithmExpr - if level > 11 { - left = p.arithmExprBase(ftok, fpos, compact) - } else { - left = p.arithmExpr(ftok, fpos, level+1, compact) - } - if compact && p.spaced { - return left - } - newLevel := arithmOpLevel(p.tok) - if newLevel < 0 { - switch p.tok { - case _Lit, _LitWord: - p.curErr("not a valid arithmetic operator: %s", p.val) - return nil - case rightParen, _EOF: - default: - if p.quote == arithmExpr { - p.curErr("not a valid arithmetic operator: %v", p.tok) - return nil - } - } - } - if newLevel < 0 || newLevel < level { - return left - } - b := &BinaryArithm{ - OpPos: p.pos, - Op: p.tok, - X: left, - } - if p.next(); compact && p.spaced { - p.followErr(b.OpPos, b.Op.String(), "an expression") - } - if b.Y = p.arithmExpr(b.Op, b.OpPos, newLevel, compact); b.Y == nil { - p.followErr(b.OpPos, b.Op.String(), "an expression") - } - return b -} - -func (p *parser) arithmExprBase(ftok Token, fpos Pos, compact bool) ArithmExpr { - var x ArithmExpr - switch p.tok { - case Inc, Dec, Not: - pre := &UnaryArithm{OpPos: p.pos, Op: p.tok} - p.next() - pre.X = p.arithmExprBase(pre.Op, pre.OpPos, compact) - return pre - case leftParen: - pe := &ParenArithm{Lparen: p.pos} - p.next() - if pe.X = p.arithmExpr(leftParen, pe.Lparen, 0, false); pe.X == nil { - p.posErr(pe.Lparen, "parentheses must enclose an expression") - } - pe.Rparen = p.matched(pe.Lparen, leftParen, rightParen) - x = pe - case Add, Sub: - ue := &UnaryArithm{OpPos: p.pos, Op: p.tok} - if p.next(); compact && p.spaced { - p.followErr(ue.OpPos, ue.Op.String(), "an expression") - } - if ue.X = p.arithmExpr(ue.Op, ue.OpPos, 0, compact); ue.X == nil { - p.followErr(ue.OpPos, ue.Op.String(), "an expression") - } - x = ue - case bckQuote: - if p.quote == arithmExprLet { - return nil - } - fallthrough - default: - w := p.word() - if w.Parts == nil { - p.followErr(fpos, ftok.String(), "an expression") - } - x = &w - } - if compact && p.spaced { - return x - } - if p.tok == Inc || p.tok == Dec { - u := &UnaryArithm{ - Post: true, - OpPos: p.pos, - Op: p.tok, - X: x, - } - p.next() - return u - } - return x -} - -func (p *parser) gotParamLit(l *Lit) bool { - l.ValuePos = p.pos - switch p.tok { - case _Lit, _LitWord: - l.Value = p.val - case dollar: - l.Value = "$" - case Quest: - l.Value = "?" - case Hash: - l.Value = "#" - case Sub: - l.Value = "-" - default: - return false - } - p.next() - return true -} - -func (p *parser) paramExp() *ParamExp { - pe := &ParamExp{Dollar: p.pos} - old := p.preNested(paramExpName) - p.next() - switch p.tok { - case dblHash: - p.tok = Hash - p.npos-- - fallthrough - case Hash: - if p.npos < len(p.src) && p.src[p.npos] != '}' { - pe.Length = true - p.next() - } - } - if !p.gotParamLit(&pe.Param) && !pe.Length { - p.posErr(pe.Dollar, "parameter expansion requires a literal") - } - if p.tok == rightBrace { - pe.Rbrace = p.pos - p.postNested(old) - p.next() - return pe - } - if p.tok == leftBrack { - if !p.bash() { - p.curErr("arrays are a bash feature") - } - lpos := p.pos - p.quote = paramExpInd - p.next() - pe.Ind = &Index{Word: p.word()} - p.quote = paramExpName - p.matched(lpos, leftBrack, rightBrack) - } - switch p.tok { - case rightBrace: - pe.Rbrace = p.pos - p.postNested(old) - p.next() - return pe - case Quo, dblQuo: - pe.Repl = &Replace{All: p.tok == dblQuo} - p.quote = paramExpRepl - p.next() - pe.Repl.Orig = p.word() - if p.tok == Quo { - p.quote = paramExpExp - p.next() - pe.Repl.With = p.word() - } - case Colon: - if !p.bash() { - p.curErr("slicing is a bash feature") - } - pe.Slice = &Slice{} - colonPos := p.pos - p.next() - if p.tok != Colon { - pe.Slice.Offset = p.followWordTok(Colon, colonPos) - } - colonPos = p.pos - if p.got(Colon) { - pe.Slice.Length = p.followWordTok(Colon, colonPos) - } - case Xor, dblXor, Comma, dblComma: - if !p.bash() { - p.curErr("case expansions are a bash feature") - } - fallthrough - default: - pe.Exp = &Expansion{Op: ParExpOperator(p.tok)} - p.quote = paramExpExp - p.next() - pe.Exp.Word = p.word() - } - p.postNested(old) - pe.Rbrace = p.pos - p.matched(pe.Dollar, dollBrace, rightBrace) - return pe -} - -func (p *parser) peekArithmEnd() bool { - return p.tok == rightParen && p.npos < len(p.src) && p.src[p.npos] == ')' -} - -func (p *parser) arithmEnd(ltok Token, lpos Pos, old saveState) Pos { - if p.peekArithmEnd() { - p.npos++ - } else { - p.matchingErr(lpos, ltok, dblRightParen) - } - p.postNested(old) - pos := p.pos - p.next() - return pos -} - -func stopToken(tok Token) bool { - switch tok { - case _EOF, semicolon, And, Or, AndExpr, OrExpr, pipeAll, dblSemicolon, - semiFall, dblSemiFall, rightParen: - return true - } - return false -} - -func (p *parser) validIdent() bool { - if p.asPos <= 0 { - return false - } - s := p.val[:p.asPos] - for i, c := range s { - switch { - case 'a' <= c && c <= 'z': - case 'A' <= c && c <= 'Z': - case c == '_': - case i > 0 && '0' <= c && c <= '9': - case i > 0 && (c == '[' || c == ']') && p.bash(): - default: - return false - } - } - return true -} - -func (p *parser) getAssign() *Assign { - asPos := p.asPos - as := &Assign{Name: p.lit(p.pos, p.val[:asPos])} - if p.val[asPos] == '+' { - as.Append = true - asPos++ - } - start := p.lit(p.pos+1, p.val[asPos+1:]) - if start.Value != "" { - start.ValuePos += Pos(asPos) - as.Value.Parts = p.singleWps(start) - } - p.next() - if p.spaced { - return as - } - if start.Value == "" && p.tok == leftParen { - if !p.bash() { - p.curErr("arrays are a bash feature") - } - ae := &ArrayExpr{Lparen: p.pos} - p.next() - for p.tok != _EOF && p.tok != rightParen { - if w := p.word(); w.Parts == nil { - p.curErr("array elements must be words") - } else { - ae.List = append(ae.List, w) - } - } - ae.Rparen = p.matched(ae.Lparen, leftParen, rightParen) - as.Value.Parts = p.singleWps(ae) - } else if !p.newLine && !stopToken(p.tok) { - if w := p.word(); start.Value == "" { - as.Value = w - } else { - as.Value.Parts = append(as.Value.Parts, w.Parts...) - } - } - return as -} - -func litRedir(src []byte, npos int) bool { - return npos < len(src) && (src[npos] == '>' || src[npos] == '<') -} - -func (p *parser) peekRedir() bool { - switch p.tok { - case _LitWord: - return litRedir(p.src, p.npos) - case Gtr, Shr, Lss, dplIn, dplOut, clbOut, rdrInOut, Shl, dashHdoc, - wordHdoc, rdrAll, appAll: - return true - } - return false -} - -func (p *parser) doRedirect(s *Stmt) { - r := &Redirect{} - var l Lit - if p.gotLit(&l) { - r.N = &l - } - r.Op, r.OpPos = RedirOperator(p.tok), p.pos - p.next() - switch r.Op { - case Hdoc, DashHdoc: - old := p.quote - p.quote = hdocWord - if p.newLine { - p.curErr("heredoc stop word must be on the same line") - } - p.heredocs = append(p.heredocs, r) - r.Word = p.followWordTok(Token(r.Op), r.OpPos) - p.quote = old - p.next() - default: - if p.newLine { - p.curErr("redirect word must be on the same line") - } - r.Word = p.followWordTok(Token(r.Op), r.OpPos) - } - s.Redirs = append(s.Redirs, r) -} - -func (p *parser) getStmt(readEnd bool) (s *Stmt, gotEnd bool) { - s = p.stmt(p.pos) - if p.gotRsrv("!") { - s.Negated = true - } -preLoop: - for { - switch p.tok { - case _Lit, _LitWord: - if p.validIdent() { - s.Assigns = append(s.Assigns, p.getAssign()) - } else if litRedir(p.src, p.npos) { - p.doRedirect(s) - } else { - break preLoop - } - case Gtr, Shr, Lss, dplIn, dplOut, clbOut, rdrInOut, Shl, dashHdoc, - wordHdoc, rdrAll, appAll: - p.doRedirect(s) - default: - break preLoop - } - switch { - case p.newLine, p.tok == _EOF: - return - case p.tok == semicolon: - if readEnd { - s.SemiPos = p.pos - p.next() - gotEnd = true - } - return - } - } - if s = p.gotStmtPipe(s); s == nil { - return - } - switch p.tok { - case AndExpr, OrExpr: - b := &BinaryCmd{OpPos: p.pos, Op: BinCmdOperator(p.tok), X: s} - p.next() - if b.Y, _ = p.getStmt(false); b.Y == nil { - p.followErr(b.OpPos, b.Op.String(), "a statement") - } - s = p.stmt(s.Position) - s.Cmd = b - if readEnd && p.gotSameLine(semicolon) { - gotEnd = true - } - case And: - p.next() - s.Background = true - gotEnd = true - case semicolon: - if !p.newLine && readEnd { - s.SemiPos = p.pos - p.next() - gotEnd = true - } - } - return -} - -func bashDeclareWord(s string) bool { - switch s { - case "declare", "local", "export", "readonly", "typeset", "nameref": - return true - } - return false -} - -func (p *parser) gotStmtPipe(s *Stmt) *Stmt { - switch p.tok { - case leftParen: - s.Cmd = p.subshell() - case dblLeftParen: - s.Cmd = p.arithmExpCmd() - case _LitWord: - switch { - case p.val == "}": - p.curErr("%s can only be used to close a block", p.val) - case p.val == "{": - s.Cmd = p.block() - case p.val == "if": - s.Cmd = p.ifClause() - case p.val == "while": - s.Cmd = p.whileClause() - case p.val == "until": - s.Cmd = p.untilClause() - case p.val == "for": - s.Cmd = p.forClause() - case p.val == "case": - s.Cmd = p.caseClause() - case p.bash() && p.val == "[[": - s.Cmd = p.testClause() - case p.bash() && bashDeclareWord(p.val): - s.Cmd = p.declClause() - case p.bash() && p.val == "eval": - s.Cmd = p.evalClause() - case p.bash() && p.val == "coproc": - s.Cmd = p.coprocClause() - case p.bash() && p.val == "let": - s.Cmd = p.letClause() - case p.bash() && p.val == "function": - s.Cmd = p.bashFuncDecl() - default: - name := Lit{ValuePos: p.pos, Value: p.val} - p.next() - if p.gotSameLine(leftParen) { - p.follow(name.ValuePos, "foo(", rightParen) - s.Cmd = p.funcDecl(name, name.ValuePos) - } else { - s.Cmd = p.callExpr(s, Word{ - Parts: p.singleWps(&name), - }) - } - } - case _Lit, dollBrace, dollDblParen, dollParen, dollar, cmdIn, cmdOut, sglQuote, - dollSglQuote, dblQuote, dollDblQuote, bckQuote, dollBrack, globQuest, globMul, globAdd, - globAt, globNot: - w := Word{Parts: p.wordParts()} - if p.gotSameLine(leftParen) && p.err == nil { - rawName := string(p.src[w.Pos()-1 : w.End()-1]) - p.posErr(w.Pos(), "invalid func name: %q", rawName) - } - s.Cmd = p.callExpr(s, w) - } - for !p.newLine && p.peekRedir() { - p.doRedirect(s) - } - if s.Cmd == nil && len(s.Redirs) == 0 && !s.Negated && len(s.Assigns) == 0 { - return nil - } - if p.tok == Or || p.tok == pipeAll { - b := &BinaryCmd{OpPos: p.pos, Op: BinCmdOperator(p.tok), X: s} - p.next() - if b.Y = p.gotStmtPipe(p.stmt(p.pos)); b.Y == nil { - p.followErr(b.OpPos, b.Op.String(), "a statement") - } - s = p.stmt(s.Position) - s.Cmd = b - } - return s -} - -func (p *parser) subshell() *Subshell { - s := &Subshell{Lparen: p.pos} - old := p.preNested(subCmd) - p.next() - s.Stmts = p.stmts() - p.postNested(old) - s.Rparen = p.matched(s.Lparen, leftParen, rightParen) - return s -} - -func (p *parser) arithmExpCmd() Command { - ar := &ArithmCmd{Left: p.pos} - old := p.preNested(arithmExprCmd) - if !p.couldBeArithm() { - p.postNested(old) - p.npos = int(ar.Left) - p.tok = leftParen - p.pos = ar.Left - s := p.subshell() - if p.err != nil { - p.err = nil - p.matchingErr(ar.Left, dblLeftParen, dblRightParen) - } - return s - } - p.next() - ar.X = p.arithmExpr(dblLeftParen, ar.Left, 0, false) - ar.Right = p.arithmEnd(dblLeftParen, ar.Left, old) - return ar -} - -func (p *parser) block() *Block { - b := &Block{Lbrace: p.pos} - p.next() - b.Stmts = p.stmts("}") - b.Rbrace = p.pos - if !p.gotRsrv("}") { - p.matchingErr(b.Lbrace, "{", "}") - } - return b -} - -func (p *parser) ifClause() *IfClause { - ic := &IfClause{If: p.pos} - p.next() - ic.CondStmts = p.followStmts("if", ic.If, "then") - ic.Then = p.followRsrv(ic.If, "if ", "then") - ic.ThenStmts = p.followStmts("then", ic.Then, "fi", "elif", "else") - elifPos := p.pos - for p.gotRsrv("elif") { - elf := &Elif{Elif: elifPos} - elf.CondStmts = p.followStmts("elif", elf.Elif, "then") - elf.Then = p.followRsrv(elf.Elif, "elif ", "then") - elf.ThenStmts = p.followStmts("then", elf.Then, "fi", "elif", "else") - ic.Elifs = append(ic.Elifs, elf) - elifPos = p.pos - } - if elsePos := p.pos; p.gotRsrv("else") { - ic.Else = elsePos - ic.ElseStmts = p.followStmts("else", ic.Else, "fi") - } - ic.Fi = p.stmtEnd(ic, "if", "fi") - return ic -} - -func (p *parser) whileClause() *WhileClause { - wc := &WhileClause{While: p.pos} - p.next() - wc.CondStmts = p.followStmts("while", wc.While, "do") - wc.Do = p.followRsrv(wc.While, "while ", "do") - wc.DoStmts = p.followStmts("do", wc.Do, "done") - wc.Done = p.stmtEnd(wc, "while", "done") - return wc -} - -func (p *parser) untilClause() *UntilClause { - uc := &UntilClause{Until: p.pos} - p.next() - uc.CondStmts = p.followStmts("until", uc.Until, "do") - uc.Do = p.followRsrv(uc.Until, "until ", "do") - uc.DoStmts = p.followStmts("do", uc.Do, "done") - uc.Done = p.stmtEnd(uc, "until", "done") - return uc -} - -func (p *parser) forClause() *ForClause { - fc := &ForClause{For: p.pos} - p.next() - fc.Loop = p.loop(fc.For) - fc.Do = p.followRsrv(fc.For, "for foo [in words]", "do") - fc.DoStmts = p.followStmts("do", fc.Do, "done") - fc.Done = p.stmtEnd(fc, "for", "done") - return fc -} - -func (p *parser) loop(forPos Pos) Loop { - if p.tok == dblLeftParen { - cl := &CStyleLoop{Lparen: p.pos} - old := p.preNested(arithmExprCmd) - p.next() - if p.tok == dblSemicolon { - p.npos-- - p.tok = semicolon - } - if p.tok != semicolon { - cl.Init = p.arithmExpr(dblLeftParen, cl.Lparen, 0, false) - } - scPos := p.pos - p.follow(p.pos, "expression", semicolon) - if p.tok != semicolon { - cl.Cond = p.arithmExpr(semicolon, scPos, 0, false) - } - scPos = p.pos - p.follow(p.pos, "expression", semicolon) - if p.tok != semicolon { - cl.Post = p.arithmExpr(semicolon, scPos, 0, false) - } - cl.Rparen = p.arithmEnd(dblLeftParen, cl.Lparen, old) - p.gotSameLine(semicolon) - return cl - } - wi := &WordIter{} - if !p.gotLit(&wi.Name) { - p.followErr(forPos, "for", "a literal") - } - if p.gotRsrv("in") { - for !p.newLine && p.tok != _EOF && p.tok != semicolon { - if w := p.word(); w.Parts == nil { - p.curErr("word list can only contain words") - } else { - wi.List = append(wi.List, w) - } - } - p.gotSameLine(semicolon) - } else if !p.newLine && !p.got(semicolon) { - p.followErr(forPos, "for foo", `"in", ; or a newline`) - } - return wi -} - -func (p *parser) caseClause() *CaseClause { - cc := &CaseClause{Case: p.pos} - p.next() - cc.Word = p.followWord("case", cc.Case) - p.followRsrv(cc.Case, "case x", "in") - cc.List = p.patLists() - cc.Esac = p.stmtEnd(cc, "case", "esac") - return cc -} - -func (p *parser) patLists() (pls []*PatternList) { - for p.tok != _EOF && !(p.tok == _LitWord && p.val == "esac") { - pl := &PatternList{} - p.got(leftParen) - for p.tok != _EOF { - if w := p.word(); w.Parts == nil { - p.curErr("case patterns must consist of words") - } else { - pl.Patterns = append(pl.Patterns, w) - } - if p.tok == rightParen { - break - } - if !p.got(Or) { - p.curErr("case patterns must be separated with |") - } - } - old := p.preNested(switchCase) - p.next() - pl.Stmts = p.stmts("esac") - p.postNested(old) - pl.OpPos = p.pos - if p.tok != dblSemicolon && p.tok != semiFall && p.tok != dblSemiFall { - pl.Op = DblSemicolon - pls = append(pls, pl) - break - } - pl.Op = CaseOperator(p.tok) - p.next() - pls = append(pls, pl) - } - return -} - -func (p *parser) testClause() *TestClause { - tc := &TestClause{Left: p.pos} - p.next() - if p.tok == _EOF || p.gotRsrv("]]") { - p.posErr(tc.Left, "test clause requires at least one expression") - } - tc.X = p.testExpr(illegalTok, tc.Left, 0) - tc.Right = p.pos - if !p.gotRsrv("]]") { - p.matchingErr(tc.Left, "[[", "]]") - } - return tc -} - -func (p *parser) testExpr(ftok Token, fpos Pos, level int) TestExpr { - var left TestExpr - if level > 1 { - left = p.testExprBase(ftok, fpos) - } else { - left = p.testExpr(ftok, fpos, level+1) - } - if left == nil { - return left - } - var newLevel int - switch p.tok { - case AndExpr, OrExpr: - case _LitWord: - if p.val == "]]" { - return left - } - fallthrough - case Lss, Gtr: - newLevel = 1 - case _EOF, rightParen: - return left - default: - p.curErr("not a valid test operator: %v", p.tok) - } - if newLevel < level { - return left - } - if p.tok == _LitWord { - if p.tok = testBinaryOp(p.val); p.tok == illegalTok { - p.curErr("not a valid test operator: %s", p.val) - } - } - b := &BinaryTest{ - OpPos: p.pos, - Op: BinTestOperator(p.tok), - X: left, - } - if b.Op == TsReMatch { - old := p.preNested(testRegexp) - p.next() - p.postNested(old) - } else { - p.next() - } - if b.Y = p.testExpr(Token(b.Op), b.OpPos, newLevel); b.Y == nil { - p.followErr(b.OpPos, b.Op.String(), "an expression") - } - return b -} - -func (p *parser) testExprBase(ftok Token, fpos Pos) TestExpr { - switch p.tok { - case _EOF: - return nil - case _LitWord: - if op := testUnaryOp(p.val); op != illegalTok { - p.tok = op - } - } - switch p.tok { - case Not: - u := &UnaryTest{OpPos: p.pos, Op: TsNot} - p.next() - u.X = p.testExpr(Token(u.Op), u.OpPos, 0) - return u - case tsExists, tsRegFile, tsDirect, tsCharSp, tsBlckSp, tsNmPipe, tsSocket, tsSmbLink, - tsGIDSet, tsUIDSet, tsRead, tsWrite, tsExec, tsNoEmpty, tsFdTerm, tsEmpStr, - tsNempStr, tsOptSet, tsVarSet, tsRefVar: - u := &UnaryTest{OpPos: p.pos, Op: UnTestOperator(p.tok)} - p.next() - w := p.followWordTok(ftok, fpos) - u.X = &w - return u - case leftParen: - pe := &ParenTest{Lparen: p.pos} - p.next() - if pe.X = p.testExpr(leftParen, pe.Lparen, 0); pe.X == nil { - p.posErr(pe.Lparen, "parentheses must enclose an expression") - } - pe.Rparen = p.matched(pe.Lparen, leftParen, rightParen) - return pe - case rightParen: - default: - w := p.followWordTok(ftok, fpos) - return &w - } - return nil -} - -func (p *parser) declClause() *DeclClause { - name := p.val - ds := &DeclClause{Position: p.pos} - switch name { - case "declare", "typeset": // typeset is an obsolete synonym - default: - ds.Variant = name - } - p.next() - for p.tok == _LitWord && p.val[0] == '-' { - ds.Opts = append(ds.Opts, p.word()) - } - for !p.newLine && !stopToken(p.tok) && !p.peekRedir() { - if (p.tok == _Lit || p.tok == _LitWord) && p.validIdent() { - ds.Assigns = append(ds.Assigns, p.getAssign()) - } else if w := p.word(); w.Parts == nil { - p.followErr(p.pos, name, "words") - } else { - ds.Assigns = append(ds.Assigns, &Assign{Value: w}) - } - } - return ds -} - -func (p *parser) evalClause() *EvalClause { - ec := &EvalClause{Eval: p.pos} - p.next() - ec.Stmt, _ = p.getStmt(false) - return ec -} - -func isBashCompoundCommand(tok Token, val string) bool { - switch tok { - case leftParen, dblLeftParen: - return true - case _LitWord: - switch val { - case "{", "if", "while", "until", "for", "case", "[[", "eval", - "coproc", "let", "function": - return true - } - if bashDeclareWord(val) { - return true - } - } - return false -} - -func (p *parser) coprocClause() *CoprocClause { - cc := &CoprocClause{Coproc: p.pos} - p.next() - if isBashCompoundCommand(p.tok, p.val) { - // has no name - cc.Stmt, _ = p.getStmt(false) - return cc - } - if p.newLine { - p.posErr(cc.Coproc, "coproc clause requires a command") - } - var l Lit - if p.gotLit(&l) { - cc.Name = &l - } - cc.Stmt, _ = p.getStmt(false) - if cc.Stmt == nil { - if cc.Name == nil { - p.posErr(cc.Coproc, "coproc clause requires a command") - return nil - } - // name was in fact the stmt - cc.Stmt = &Stmt{ - Position: cc.Name.ValuePos, - Cmd: &CallExpr{Args: []Word{ - {Parts: p.singleWps(cc.Name)}, - }}, - } - cc.Name = nil - } else if cc.Name != nil { - if call, ok := cc.Stmt.Cmd.(*CallExpr); ok { - // name was in fact the start of a call - call.Args = append([]Word{{Parts: p.singleWps(cc.Name)}}, - call.Args...) - cc.Name = nil - } - } - return cc -} - -func (p *parser) letClause() *LetClause { - lc := &LetClause{Let: p.pos} - old := p.preNested(arithmExprLet) - p.next() - for !p.newLine && !stopToken(p.tok) && !p.peekRedir() { - x := p.arithmExpr(illegalTok, lc.Let, 0, true) - if x == nil { - break - } - lc.Exprs = append(lc.Exprs, x) - } - if len(lc.Exprs) == 0 { - p.posErr(lc.Let, "let clause requires at least one expression") - } - p.postNested(old) - if p.tok == illegalTok { - p.next() - } - return lc -} - -func (p *parser) bashFuncDecl() *FuncDecl { - fpos := p.pos - p.next() - if p.tok != _LitWord { - if w := p.followWord("function", fpos); p.err == nil { - rawName := string(p.src[w.Pos()-1 : w.End()-1]) - p.posErr(w.Pos(), "invalid func name: %q", rawName) - } - } - name := Lit{ValuePos: p.pos, Value: p.val} - p.next() - if p.gotSameLine(leftParen) { - p.follow(name.ValuePos, "foo(", rightParen) - } - return p.funcDecl(name, fpos) -} - -func (p *parser) callExpr(s *Stmt, w Word) *CallExpr { - alloc := &struct { - ce CallExpr - ws [4]Word - }{} - ce := &alloc.ce - ce.Args = alloc.ws[:1] - ce.Args[0] = w - for !p.newLine { - switch p.tok { - case _EOF, semicolon, And, Or, AndExpr, OrExpr, pipeAll, - dblSemicolon, semiFall, dblSemiFall: - return ce - case _LitWord: - if litRedir(p.src, p.npos) { - p.doRedirect(s) - continue - } - ce.Args = append(ce.Args, Word{ - Parts: p.singleWps(p.lit(p.pos, p.val)), - }) - p.next() - case bckQuote: - if p.quote == subCmdBckquo { - return ce - } - fallthrough - case _Lit, dollBrace, dollDblParen, dollParen, dollar, cmdIn, cmdOut, - sglQuote, dollSglQuote, dblQuote, dollDblQuote, dollBrack, globQuest, - globMul, globAdd, globAt, globNot: - ce.Args = append(ce.Args, Word{Parts: p.wordParts()}) - case Gtr, Shr, Lss, dplIn, dplOut, clbOut, rdrInOut, Shl, - dashHdoc, wordHdoc, rdrAll, appAll: - p.doRedirect(s) - case rightParen: - if p.quote == subCmd { - return ce - } - fallthrough - default: - p.curErr("a command can only contain words and redirects") - } - } - return ce -} - -func (p *parser) funcDecl(name Lit, pos Pos) *FuncDecl { - fd := &FuncDecl{ - Position: pos, - BashStyle: pos != name.ValuePos, - Name: name, - } - if fd.Body, _ = p.getStmt(false); fd.Body == nil { - p.followErr(fd.Pos(), "foo()", "a statement") - } - return fd -} diff --git a/vendor/github.com/mvdan/sh/syntax/printer.go b/vendor/github.com/mvdan/sh/syntax/printer.go deleted file mode 100644 index 4141f05..0000000 --- a/vendor/github.com/mvdan/sh/syntax/printer.go +++ /dev/null @@ -1,1147 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package syntax - -import ( - "bufio" - "io" - "sync" -) - -// PrintConfig controls how the printing of an AST node will behave. -type PrintConfig struct { - Spaces int // 0 (default) for tabs, >0 for number of spaces -} - -var printerFree = sync.Pool{ - New: func() interface{} { - return &printer{bufWriter: bufio.NewWriter(nil)} - }, -} - -// Fprint "pretty-prints" the given AST file to the given writer. -func (c PrintConfig) Fprint(w io.Writer, f *File) error { - p := printerFree.Get().(*printer) - p.reset() - p.f, p.c = f, c - p.comments = f.Comments - p.bufWriter.Reset(w) - p.stmts(f.Stmts) - p.commentsUpTo(0) - p.newline(0) - err := p.bufWriter.Flush() - printerFree.Put(p) - return err -} - -// Fprint "pretty-prints" the given AST file to the given writer. It -// calls PrintConfig.Fprint with its default settings. -func Fprint(w io.Writer, f *File) error { - return PrintConfig{}.Fprint(w, f) -} - -type bufWriter interface { - WriteByte(byte) error - WriteString(string) (int, error) - Reset(io.Writer) - Flush() error -} - -type printer struct { - bufWriter - - f *File - c PrintConfig - - wantSpace bool - wantNewline bool - wroteSemi bool - - commentPadding int - - // nline is the position of the next newline - nline Pos - nlineIndex int - - // lastLevel is the last level of indentation that was used. - lastLevel int - // level is the current level of indentation. - level int - // levelIncs records which indentation level increments actually - // took place, to revert them once their section ends. - levelIncs []bool - - nestedBinary bool - - // comments is the list of pending comments to write. - comments []*Comment - - // pendingHdocs is the list of pending heredocs to write. - pendingHdocs []*Redirect - - // used in stmtLen to align comments - lenPrinter *printer - lenCounter byteCounter -} - -func (p *printer) reset() { - p.wantSpace, p.wantNewline = false, false - p.commentPadding = 0 - p.nline, p.nlineIndex = 0, 0 - p.lastLevel, p.level = 0, 0 - p.levelIncs = p.levelIncs[:0] - p.nestedBinary = false - p.pendingHdocs = p.pendingHdocs[:0] -} - -func (p *printer) incLine() { - if p.nlineIndex++; p.nlineIndex >= len(p.f.Lines) { - p.nline = maxPos - } else { - p.nline = Pos(p.f.Lines[p.nlineIndex]) - } -} - -func (p *printer) incLines(pos Pos) { - for p.nline < pos { - p.incLine() - } -} - -func (p *printer) space() { - p.WriteByte(' ') - p.wantSpace = false -} - -func (p *printer) spaces(n int) { - for i := 0; i < n; i++ { - p.WriteByte(' ') - } -} - -func (p *printer) tabs(n int) { - for i := 0; i < n; i++ { - p.WriteByte('\t') - } -} - -func (p *printer) bslashNewl() { - p.WriteString(" \\\n") - p.wantSpace = false - p.incLine() -} - -func (p *printer) spacedString(s string, spaceAfter bool) { - if p.wantSpace { - p.WriteByte(' ') - } - p.WriteString(s) - p.wantSpace = spaceAfter -} - -func (p *printer) semiOrNewl(s string, pos Pos) { - if p.wantNewline { - p.newline(pos) - p.indent() - } else { - if !p.wroteSemi { - p.WriteByte(';') - } - p.WriteByte(' ') - } - p.incLines(pos) - p.WriteString(s) - p.wantSpace = true -} - -func (p *printer) incLevel() { - inc := false - if p.level <= p.lastLevel { - p.level++ - inc = true - } else if last := &p.levelIncs[len(p.levelIncs)-1]; *last { - *last = false - inc = true - } - p.levelIncs = append(p.levelIncs, inc) -} - -func (p *printer) decLevel() { - if p.levelIncs[len(p.levelIncs)-1] { - p.level-- - } - p.levelIncs = p.levelIncs[:len(p.levelIncs)-1] -} - -func (p *printer) indent() { - p.lastLevel = p.level - switch { - case p.level == 0: - case p.c.Spaces == 0: - p.tabs(p.level) - case p.c.Spaces > 0: - p.spaces(p.c.Spaces * p.level) - } -} - -func (p *printer) newline(pos Pos) { - p.wantNewline, p.wantSpace = false, false - p.WriteByte('\n') - if pos > p.nline { - p.incLine() - } - hdocs := p.pendingHdocs - p.pendingHdocs = p.pendingHdocs[:0] - for _, r := range hdocs { - p.word(r.Hdoc) - p.incLines(r.Hdoc.End() + 1) - p.unquotedWord(r.Word) - p.WriteByte('\n') - p.incLine() - p.wantSpace = false - } -} - -func (p *printer) newlines(pos Pos) { - p.newline(pos) - if pos > p.nline { - // preserve single empty lines - p.WriteByte('\n') - p.incLine() - } - p.indent() -} - -func (p *printer) commentsAndSeparate(pos Pos) { - p.commentsUpTo(pos) - if p.wantNewline || pos > p.nline { - p.newlines(pos) - } -} - -func (p *printer) sepTok(s string, pos Pos) { - p.level++ - p.commentsUpTo(pos) - p.level-- - if p.wantNewline || pos > p.nline { - p.newlines(pos) - } - p.WriteString(s) - p.wantSpace = true -} - -func (p *printer) semiRsrv(s string, pos Pos, fallback bool) { - p.level++ - p.commentsUpTo(pos) - p.level-- - if p.wantNewline || pos > p.nline { - p.newlines(pos) - } else if fallback { - if !p.wroteSemi { - p.WriteByte(';') - } - p.WriteByte(' ') - } else if p.wantSpace { - p.WriteByte(' ') - } - p.WriteString(s) - p.wantSpace = true -} - -func (p *printer) commentsUpTo(pos Pos) { - if len(p.comments) < 1 { - return - } - c := p.comments[0] - if pos > 0 && c.Hash >= pos { - return - } - p.comments = p.comments[1:] - switch { - case p.nlineIndex == 0: - case c.Hash >= p.nline: - p.newlines(c.Hash) - default: - p.spaces(p.commentPadding + 1) - } - p.incLines(c.Hash) - p.WriteByte('#') - p.WriteString(c.Text) - p.commentsUpTo(pos) -} - -func (p *printer) expansionOp(op ParExpOperator) { - switch op { - case SubstAdd: - p.WriteByte('+') - case SubstColAdd: - p.WriteString(":+") - case SubstSub: - p.WriteByte('-') - case SubstColSub: - p.WriteString(":-") - case SubstQuest: - p.WriteByte('?') - case SubstColQuest: - p.WriteString(":?") - case SubstAssgn: - p.WriteByte('=') - case SubstColAssgn: - p.WriteString(":=") - case RemSmallSuffix: - p.WriteByte('%') - case RemLargeSuffix: - p.WriteString("%%") - case RemSmallPrefix: - p.WriteByte('#') - case RemLargePrefix: - p.WriteString("##") - case UpperFirst: - p.WriteByte('^') - case UpperAll: - p.WriteString("^^") - case LowerFirst: - p.WriteByte(',') - default: // LowerAll - p.WriteString(",,") - } -} - -func (p *printer) wordPart(wp WordPart) { - switch x := wp.(type) { - case *Lit: - p.WriteString(x.Value) - case *SglQuoted: - if x.Dollar { - p.WriteByte('$') - } - p.WriteByte('\'') - p.WriteString(x.Value) - p.WriteByte('\'') - p.incLines(x.End()) - case *DblQuoted: - if x.Dollar { - p.WriteByte('$') - } - p.WriteByte('"') - for i, n := range x.Parts { - p.wordPart(n) - if i == len(x.Parts)-1 { - p.incLines(n.End()) - } - } - p.WriteByte('"') - case *CmdSubst: - p.incLines(x.Pos()) - p.WriteString("$(") - p.wantSpace = len(x.Stmts) > 0 && startsWithLparen(x.Stmts[0]) - p.nestedStmts(x.Stmts, x.Right) - p.sepTok(")", x.Right) - case *ParamExp: - if x.Short { - p.WriteByte('$') - p.WriteString(x.Param.Value) - break - } - p.WriteString("${") - if x.Length { - p.WriteByte('#') - } - p.WriteString(x.Param.Value) - if x.Ind != nil { - p.WriteByte('[') - p.word(x.Ind.Word) - p.WriteByte(']') - } - if x.Slice != nil { - p.WriteByte(':') - p.word(x.Slice.Offset) - if w2 := x.Slice.Length; w2.Parts != nil { - p.WriteByte(':') - p.word(w2) - } - } else if x.Repl != nil { - if x.Repl.All { - p.WriteByte('/') - } - p.WriteByte('/') - p.word(x.Repl.Orig) - p.WriteByte('/') - p.word(x.Repl.With) - } else if x.Exp != nil { - p.expansionOp(x.Exp.Op) - p.word(x.Exp.Word) - } - p.WriteByte('}') - case *ArithmExp: - p.WriteString("$((") - p.arithmExpr(x.X, false) - p.WriteString("))") - case *ArrayExpr: - p.wantSpace = false - p.WriteByte('(') - p.wordJoin(x.List, false) - p.sepTok(")", x.Rparen) - case *ExtGlob: - p.wantSpace = false - p.WriteString(x.Op.String()) - p.WriteString(x.Pattern.Value) - p.WriteByte(')') - case *ProcSubst: - // avoid conflict with << and others - if p.wantSpace { - p.space() - } - switch x.Op { - case CmdIn: - p.WriteString("<(") - default: // CmdOut - p.WriteString(">(") - } - p.nestedStmts(x.Stmts, 0) - p.WriteByte(')') - } - p.wantSpace = true -} - -func (p *printer) loop(loop Loop) { - switch x := loop.(type) { - case *WordIter: - p.WriteString(x.Name.Value) - if len(x.List) > 0 { - p.WriteString(" in") - p.wordJoin(x.List, true) - } - case *CStyleLoop: - p.WriteString("((") - if x.Init == nil { - p.WriteByte(' ') - } - p.arithmExpr(x.Init, false) - p.WriteString("; ") - p.arithmExpr(x.Cond, false) - p.WriteString("; ") - p.arithmExpr(x.Post, false) - p.WriteString("))") - } -} - -func (p *printer) binaryExprOp(tok Token) { - switch tok { - case Assgn: - p.WriteByte('=') - case Add: - p.WriteByte('+') - case Sub: - p.WriteByte('-') - case Rem: - p.WriteByte('%') - case Mul: - p.WriteByte('*') - case Quo: - p.WriteByte('/') - case And: - p.WriteByte('&') - case Or: - p.WriteByte('|') - case AndExpr: - p.WriteString("&&") - case OrExpr: - p.WriteString("||") - case Xor: - p.WriteByte('^') - case Pow: - p.WriteString("**") - case Eql: - p.WriteString("==") - case Neq: - p.WriteString("!=") - case Leq: - p.WriteString("<=") - case Geq: - p.WriteString(">=") - case AddAssgn: - p.WriteString("+=") - case SubAssgn: - p.WriteString("-=") - case MulAssgn: - p.WriteString("*=") - case QuoAssgn: - p.WriteString("/=") - case RemAssgn: - p.WriteString("%=") - case AndAssgn: - p.WriteString("&=") - case OrAssgn: - p.WriteString("|=") - case XorAssgn: - p.WriteString("^=") - case ShlAssgn: - p.WriteString("<<=") - case ShrAssgn: - p.WriteString(">>=") - case Lss: - p.WriteByte('<') - case Gtr: - p.WriteByte('>') - case Shl: - p.WriteString("<<") - case Shr: - p.WriteString(">>") - case Quest: - p.WriteByte('?') - case Colon: - p.WriteByte(':') - default: // Comma - p.WriteByte(',') - } -} - -func (p *printer) unaryExprOp(tok Token) { - switch tok { - case Add: - p.WriteByte('+') - case Sub: - p.WriteByte('-') - case Not: - p.WriteByte('!') - case Inc: - p.WriteString("++") - default: // Dec - p.WriteString("--") - } -} - -func (p *printer) arithmExpr(expr ArithmExpr, compact bool) { - p.wantSpace = false - switch x := expr.(type) { - case *Word: - p.word(*x) - case *BinaryArithm: - if compact { - p.arithmExpr(x.X, compact) - p.binaryExprOp(x.Op) - p.arithmExpr(x.Y, compact) - } else { - p.arithmExpr(x.X, compact) - if x.Op != Comma { - p.WriteByte(' ') - } - p.binaryExprOp(x.Op) - p.space() - p.arithmExpr(x.Y, compact) - } - case *UnaryArithm: - if x.Post { - p.arithmExpr(x.X, compact) - p.unaryExprOp(x.Op) - } else { - p.unaryExprOp(x.Op) - p.arithmExpr(x.X, compact) - } - case *ParenArithm: - p.WriteByte('(') - p.arithmExpr(x.X, false) - p.WriteByte(')') - } -} - -func (p *printer) unaryTestOp(op UnTestOperator) { - switch op { - case TsNot: - p.WriteByte('!') - case TsExists: - p.WriteString("-e") - case TsRegFile: - p.WriteString("-f") - case TsDirect: - p.WriteString("-d") - case TsCharSp: - p.WriteString("-c") - case TsBlckSp: - p.WriteString("-b") - case TsNmPipe: - p.WriteString("-p") - case TsSocket: - p.WriteString("-S") - case TsSmbLink: - p.WriteString("-L") - case TsGIDSet: - p.WriteString("-g") - case TsUIDSet: - p.WriteString("-u") - case TsRead: - p.WriteString("-r") - case TsWrite: - p.WriteString("-w") - case TsExec: - p.WriteString("-x") - case TsNoEmpty: - p.WriteString("-s") - case TsFdTerm: - p.WriteString("-t") - case TsEmpStr: - p.WriteString("-z") - case TsNempStr: - p.WriteString("-n") - case TsOptSet: - p.WriteString("-o") - case TsVarSet: - p.WriteString("-v") - default: // TsRefVar - p.WriteString("-R") - } -} - -func (p *printer) binaryTestOp(op BinTestOperator) { - switch op { - case AndTest: - p.WriteString("&&") - case OrTest: - p.WriteString("||") - case TsAssgn: - p.WriteByte('=') - case TsEqual: - p.WriteString("==") - case TsNequal: - p.WriteString("!=") - case TsReMatch: - p.WriteString("=~") - case TsNewer: - p.WriteString("-nt") - case TsOlder: - p.WriteString("-ot") - case TsDevIno: - p.WriteString("-ef") - case TsEql: - p.WriteString("-eq") - case TsNeq: - p.WriteString("-ne") - case TsLeq: - p.WriteString("-le") - case TsGeq: - p.WriteString("-ge") - case TsLss: - p.WriteString("-lt") - case TsGtr: - p.WriteString("-gt") - case TsBefore: - p.WriteByte('<') - case TsAfter: - p.WriteByte('>') - } -} - -func (p *printer) testExpr(expr TestExpr) { - p.wantSpace = false - switch x := expr.(type) { - case *Word: - p.word(*x) - case *BinaryTest: - p.testExpr(x.X) - p.space() - p.binaryTestOp(x.Op) - p.space() - p.testExpr(x.Y) - case *UnaryTest: - p.unaryTestOp(x.Op) - p.space() - p.testExpr(x.X) - case *ParenTest: - p.WriteByte('(') - p.testExpr(x.X) - p.WriteByte(')') - } -} - -func (p *printer) word(w Word) { - for _, n := range w.Parts { - p.wordPart(n) - } -} - -func (p *printer) unquotedWord(w Word) { - for _, wp := range w.Parts { - switch x := wp.(type) { - case *SglQuoted: - p.WriteString(x.Value) - case *DblQuoted: - for _, qp := range x.Parts { - p.wordPart(qp) - } - case *Lit: - if x.Value[0] == '\\' { - p.WriteString(x.Value[1:]) - } else { - p.WriteString(x.Value) - } - default: - p.wordPart(wp) - } - } -} - -func (p *printer) wordJoin(ws []Word, backslash bool) { - anyNewline := false - for _, w := range ws { - if pos := w.Pos(); pos > p.nline { - p.commentsUpTo(pos) - if backslash { - p.bslashNewl() - } else { - p.WriteByte('\n') - p.incLine() - } - if !anyNewline { - p.incLevel() - anyNewline = true - } - p.indent() - } else if p.wantSpace { - p.space() - } - for _, n := range w.Parts { - p.wordPart(n) - } - } - if anyNewline { - p.decLevel() - } -} - -func (p *printer) stmt(s *Stmt) { - if s.Negated { - p.spacedString("!", true) - } - p.assigns(s.Assigns) - startRedirs := p.command(s.Cmd, s.Redirs) - anyNewline := false - for _, r := range s.Redirs[startRedirs:] { - if r.OpPos > p.nline { - p.bslashNewl() - if !anyNewline { - p.incLevel() - anyNewline = true - } - p.indent() - } - p.commentsAndSeparate(r.OpPos) - if p.wantSpace { - p.WriteByte(' ') - } - if r.N != nil { - p.WriteString(r.N.Value) - } - p.redirectOp(r.Op) - p.wantSpace = true - p.word(r.Word) - if r.Op == Hdoc || r.Op == DashHdoc { - p.pendingHdocs = append(p.pendingHdocs, r) - } - } - p.wroteSemi = false - if s.SemiPos > 0 && s.SemiPos > p.nline { - p.incLevel() - p.bslashNewl() - p.indent() - p.decLevel() - p.WriteByte(';') - p.wroteSemi = true - } else if s.Background { - p.WriteString(" &") - } - if anyNewline { - p.decLevel() - } -} - -func (p *printer) redirectOp(op RedirOperator) { - switch op { - case RdrIn: - p.WriteByte('<') - case RdrOut: - p.WriteByte('>') - case Hdoc: - p.WriteString("<<") - case AppOut: - p.WriteString(">>") - case RdrInOut: - p.WriteString("<>") - case DplIn: - p.WriteString("<&") - case DplOut: - p.WriteString(">&") - case ClbOut: - p.WriteString(">|") - case DashHdoc: - p.WriteString("<<-") - case WordHdoc: - p.WriteString("<<<") - case RdrAll: - p.WriteString("&>") - default: // AppAll - p.WriteString("&>>") - } -} - -func binaryCmdOp(op BinCmdOperator) string { - switch op { - case AndStmt: - return "&&" - case OrStmt: - return "||" - case Pipe: - return "|" - default: // PipeAll - return "|&" - } -} - -func caseClauseOp(op CaseOperator) string { - switch op { - case DblSemicolon: - return ";;" - case SemiFall: - return ";&" - default: // DblSemiFall - return ";;&" - } -} - -func (p *printer) command(cmd Command, redirs []*Redirect) (startRedirs int) { - switch x := cmd.(type) { - case *CallExpr: - if len(x.Args) <= 1 { - p.wordJoin(x.Args, true) - return 0 - } - p.wordJoin(x.Args[:1], true) - for _, r := range redirs { - if r.Pos() > x.Args[1].Pos() || r.Op == Hdoc || r.Op == DashHdoc { - break - } - if p.wantSpace { - p.space() - } - if r.N != nil { - p.WriteString(r.N.Value) - } - p.redirectOp(r.Op) - p.wantSpace = true - p.word(r.Word) - startRedirs++ - } - p.wordJoin(x.Args[1:], true) - case *Block: - p.spacedString("{", true) - p.nestedStmts(x.Stmts, x.Rbrace) - p.semiRsrv("}", x.Rbrace, true) - case *IfClause: - p.spacedString("if", true) - p.nestedStmts(x.CondStmts, 0) - p.semiOrNewl("then", x.Then) - p.nestedStmts(x.ThenStmts, 0) - for _, el := range x.Elifs { - p.semiRsrv("elif", el.Elif, true) - p.nestedStmts(el.CondStmts, 0) - p.semiOrNewl("then", el.Then) - p.nestedStmts(el.ThenStmts, 0) - } - if len(x.ElseStmts) > 0 { - p.semiRsrv("else", x.Else, true) - p.nestedStmts(x.ElseStmts, 0) - } else if x.Else > 0 { - p.incLines(x.Else) - } - p.semiRsrv("fi", x.Fi, true) - case *Subshell: - p.spacedString("(", false) - p.wantSpace = len(x.Stmts) > 0 && startsWithLparen(x.Stmts[0]) - p.nestedStmts(x.Stmts, x.Rparen) - p.sepTok(")", x.Rparen) - case *WhileClause: - p.spacedString("while", true) - p.nestedStmts(x.CondStmts, 0) - p.semiOrNewl("do", x.Do) - p.nestedStmts(x.DoStmts, 0) - p.semiRsrv("done", x.Done, true) - case *ForClause: - p.spacedString("for ", true) - p.loop(x.Loop) - p.semiOrNewl("do", x.Do) - p.nestedStmts(x.DoStmts, 0) - p.semiRsrv("done", x.Done, true) - case *BinaryCmd: - p.stmt(x.X) - indent := !p.nestedBinary - if indent { - p.incLevel() - } - _, p.nestedBinary = x.Y.Cmd.(*BinaryCmd) - if len(p.pendingHdocs) == 0 && x.Y.Pos() > p.nline { - p.bslashNewl() - p.indent() - } - p.spacedString(binaryCmdOp(x.Op), true) - p.incLines(x.Y.Pos()) - p.stmt(x.Y) - if indent { - p.decLevel() - } - p.nestedBinary = false - case *FuncDecl: - if x.BashStyle { - p.WriteString("function ") - } - p.WriteString(x.Name.Value) - p.WriteString("() ") - p.incLines(x.Body.Pos()) - p.stmt(x.Body) - case *CaseClause: - p.spacedString("case ", true) - p.word(x.Word) - p.WriteString(" in") - p.incLevel() - for _, pl := range x.List { - p.commentsAndSeparate(pl.Patterns[0].Pos()) - for i, w := range pl.Patterns { - if i > 0 { - p.spacedString("|", true) - } - if p.wantSpace { - p.WriteByte(' ') - } - for _, n := range w.Parts { - p.wordPart(n) - } - } - p.WriteByte(')') - sep := len(pl.Stmts) > 1 || (len(pl.Stmts) > 0 && pl.Stmts[0].Pos() > p.nline) - p.nestedStmts(pl.Stmts, 0) - p.level++ - if sep { - p.sepTok(caseClauseOp(pl.Op), pl.OpPos) - } else { - p.spacedString(caseClauseOp(pl.Op), true) - } - p.incLines(pl.OpPos) - p.level-- - if sep || pl.OpPos == x.Esac { - p.wantNewline = true - } - } - p.decLevel() - p.semiRsrv("esac", x.Esac, len(x.List) == 0) - case *UntilClause: - p.spacedString("until", true) - p.nestedStmts(x.CondStmts, 0) - p.semiOrNewl("do", x.Do) - p.nestedStmts(x.DoStmts, 0) - p.semiRsrv("done", x.Done, true) - case *ArithmCmd: - if p.wantSpace { - p.space() - } - p.WriteString("((") - p.arithmExpr(x.X, false) - p.WriteString("))") - case *TestClause: - p.spacedString("[[", true) - p.space() - p.testExpr(x.X) - p.spacedString("]]", true) - case *DeclClause: - name := x.Variant - if name == "" { - name = "declare" - } - p.spacedString(name, true) - for _, w := range x.Opts { - p.WriteByte(' ') - p.word(w) - } - p.assigns(x.Assigns) - case *EvalClause: - p.spacedString("eval", true) - if x.Stmt != nil { - p.stmt(x.Stmt) - } - case *CoprocClause: - p.spacedString("coproc", true) - if x.Name != nil { - p.WriteByte(' ') - p.WriteString(x.Name.Value) - } - p.stmt(x.Stmt) - case *LetClause: - p.spacedString("let", true) - for _, n := range x.Exprs { - p.space() - p.arithmExpr(n, true) - } - } - return startRedirs -} - -func startsWithLparen(s *Stmt) bool { - switch x := s.Cmd.(type) { - case *Subshell: - return true - case *BinaryCmd: - return startsWithLparen(x.X) - } - return false -} - -func (p *printer) hasInline(pos, npos, nline Pos) bool { - for _, c := range p.comments { - if c.Hash > nline { - return false - } - if c.Hash > pos && (npos == 0 || c.Hash < npos) { - return true - } - } - return false -} - -func (p *printer) stmts(stmts []*Stmt) { - switch len(stmts) { - case 0: - return - case 1: - s := stmts[0] - pos := s.Pos() - p.commentsUpTo(pos) - if pos <= p.nline { - p.stmt(s) - } else { - if p.nlineIndex > 0 { - p.newlines(pos) - } else { - p.incLines(pos) - } - p.stmt(s) - p.wantNewline = true - } - return - } - inlineIndent := 0 - for i, s := range stmts { - pos := s.Pos() - ind := p.nlineIndex - p.commentsUpTo(pos) - if p.nlineIndex > 0 { - p.newlines(pos) - } - p.incLines(pos) - p.stmt(s) - var npos Pos - if i+1 < len(stmts) { - npos = stmts[i+1].Pos() - } - if !p.hasInline(pos, npos, p.nline) { - inlineIndent = 0 - p.commentPadding = 0 - continue - } - if ind < len(p.f.Lines)-1 && s.End() > Pos(p.f.Lines[ind+1]) { - inlineIndent = 0 - } - if inlineIndent == 0 { - ind2 := p.nlineIndex - nline2 := p.nline - follow := stmts[i:] - for j, s2 := range follow { - pos2 := s2.Pos() - var npos2 Pos - if j+1 < len(follow) { - npos2 = follow[j+1].Pos() - } - if pos2 > nline2 || !p.hasInline(pos2, npos2, nline2) { - break - } - if l := p.stmtLen(s2); l > inlineIndent { - inlineIndent = l - } - if ind2++; ind2 >= len(p.f.Lines) { - nline2 = maxPos - } else { - nline2 = Pos(p.f.Lines[ind2]) - } - } - if ind2 == p.nlineIndex+1 { - // no inline comments directly after this one - continue - } - } - if inlineIndent > 0 { - p.commentPadding = inlineIndent - p.stmtLen(s) - } - } - p.wantNewline = true -} - -type byteCounter int - -func (c *byteCounter) WriteByte(b byte) error { - *c++ - return nil -} -func (c *byteCounter) WriteString(s string) (int, error) { - *c += byteCounter(len(s)) - return 0, nil -} -func (c *byteCounter) Reset(io.Writer) { *c = 0 } -func (c *byteCounter) Flush() error { return nil } - -func (p *printer) stmtLen(s *Stmt) int { - if p.lenPrinter == nil { - p.lenPrinter = new(printer) - } - *p.lenPrinter = printer{bufWriter: &p.lenCounter} - p.lenPrinter.bufWriter.Reset(nil) - p.lenPrinter.f = p.f - p.lenPrinter.incLines(s.Pos()) - p.lenPrinter.stmt(s) - return int(p.lenCounter) -} - -func (p *printer) nestedStmts(stmts []*Stmt, closing Pos) { - p.incLevel() - if len(stmts) == 1 && closing > p.nline && stmts[0].End() <= p.nline { - p.newline(0) - p.indent() - } - p.stmts(stmts) - p.decLevel() -} - -func (p *printer) assigns(assigns []*Assign) { - anyNewline := false - for _, a := range assigns { - if a.Pos() > p.nline { - p.bslashNewl() - if !anyNewline { - p.incLevel() - anyNewline = true - } - p.indent() - } else if p.wantSpace { - p.space() - } - if a.Name != nil { - p.WriteString(a.Name.Value) - if a.Append { - p.WriteByte('+') - } - p.WriteByte('=') - } - p.word(a.Value) - p.wantSpace = true - } - if anyNewline { - p.decLevel() - } -} diff --git a/vendor/github.com/mvdan/sh/syntax/tokens.go b/vendor/github.com/mvdan/sh/syntax/tokens.go deleted file mode 100644 index dee108a..0000000 --- a/vendor/github.com/mvdan/sh/syntax/tokens.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package syntax - -// Token is the set of lexical tokens and reserved words. -type Token int - -// The list of all possible tokens and reserved words. -const ( - illegalTok Token = iota - _EOF - _Lit - _LitWord - - sglQuote // ' - dblQuote // " - bckQuote // ` - - And // & - AndExpr // && - OrExpr // || - Or // | - pipeAll // |& - bash - - dollar // $ - dollSglQuote // $' - bash - dollDblQuote // $" - bash - dollBrace // ${ - dollBrack // $[ - dollParen // $( - dollDblParen // $(( - leftBrack // [ - leftParen // ( - dblLeftParen // (( - bash - - rightBrace // } - rightBrack // ] - rightParen // ) - dblRightParen // )) - semicolon // ; - - dblSemicolon // ;; - semiFall // ;& - bash - dblSemiFall // ;;& - bash - - Mul // * - Not // ! - Inc // ++ - Dec // -- - Pow // ** - Eql // == - Neq // != - Leq // <= - Geq // >= - - AddAssgn // += - SubAssgn // -= - MulAssgn // *= - QuoAssgn // /= - RemAssgn // %= - AndAssgn // &= - OrAssgn // |= - XorAssgn // ^= - ShlAssgn // <<= - ShrAssgn // >>= - - Gtr // > - Shr // >> - Lss // < - rdrInOut // <> - dplIn // <& - dplOut // >& - clbOut // >| - Shl // << - dashHdoc // <<- - wordHdoc // <<< - bash - rdrAll // &> - bash - appAll // &>> - bash - - cmdIn // <( - bash - cmdOut // >( - bash - - Add // + - ColAdd // :+ - Sub // - - ColSub // :- - Quest // ? - ColQuest // :? - Assgn // = - ColAssgn // := - Rem // % - dblRem // %% - Hash // # - dblHash // ## - Xor // ^ - dblXor // ^^ - bash - Comma // , - dblComma // ,, - bash - Quo // / - dblQuo // // - Colon // : - - tsNot // ! - tsExists // -e - tsRegFile // -f - tsDirect // -d - tsCharSp // -c - tsBlckSp // -b - tsNmPipe // -p - tsSocket // -S - tsSmbLink // -L - tsGIDSet // -g - tsUIDSet // -u - tsRead // -r - tsWrite // -w - tsExec // -x - tsNoEmpty // -s - tsFdTerm // -t - tsEmpStr // -z - tsNempStr // -n - tsOptSet // -o - tsVarSet // -v - tsRefVar // -R - - tsReMatch // =~ - tsNewer // -nt - tsOlder // -ot - tsDevIno // -ef - tsEql // -eq - tsNeq // -ne - tsLeq // -le - tsGeq // -ge - tsLss // -lt - tsGtr // -gt - - globQuest // ?( - globMul // *( - globAdd // +( - globAt // @( - globNot // !( -) - -type RedirOperator Token - -const ( - RdrOut = RedirOperator(Gtr) + iota - AppOut - RdrIn - RdrInOut - DplIn - DplOut - ClbOut - Hdoc - DashHdoc - WordHdoc - RdrAll - AppAll -) - -type ProcOperator Token - -const ( - CmdIn = ProcOperator(cmdIn) + iota - CmdOut -) - -type GlobOperator Token - -const ( - GlobQuest = GlobOperator(globQuest) + iota - GlobMul - GlobAdd - GlobAt - GlobNot -) - -type BinCmdOperator Token - -const ( - AndStmt = BinCmdOperator(AndExpr) + iota - OrStmt - Pipe - PipeAll -) - -type CaseOperator Token - -const ( - DblSemicolon = CaseOperator(dblSemicolon) + iota - SemiFall - DblSemiFall -) - -type ParExpOperator Token - -const ( - SubstAdd = ParExpOperator(Add) + iota - SubstColAdd - SubstSub - SubstColSub - SubstQuest - SubstColQuest - SubstAssgn - SubstColAssgn - RemSmallSuffix - RemLargeSuffix - RemSmallPrefix - RemLargePrefix - UpperFirst - UpperAll - LowerFirst - LowerAll -) - -type UnTestOperator Token - -const ( - TsNot = UnTestOperator(tsNot) + iota - TsExists - TsRegFile - TsDirect - TsCharSp - TsBlckSp - TsNmPipe - TsSocket - TsSmbLink - TsGIDSet - TsUIDSet - TsRead - TsWrite - TsExec - TsNoEmpty - TsFdTerm - TsEmpStr - TsNempStr - TsOptSet - TsVarSet - TsRefVar -) - -type BinTestOperator Token - -const ( - TsReMatch = BinTestOperator(tsReMatch) + iota - TsNewer - TsOlder - TsDevIno - TsEql - TsNeq - TsLeq - TsGeq - TsLss - TsGtr - AndTest = BinTestOperator(AndExpr) - OrTest = BinTestOperator(OrExpr) - TsAssgn = BinTestOperator(Assgn) - TsEqual = BinTestOperator(Eql) - TsNequal = BinTestOperator(Neq) - TsBefore = BinTestOperator(Lss) - TsAfter = BinTestOperator(Gtr) -) - -func (o RedirOperator) String() string { return Token(o).String() } -func (o ProcOperator) String() string { return Token(o).String() } -func (o GlobOperator) String() string { return Token(o).String() } -func (o BinCmdOperator) String() string { return Token(o).String() } -func (o CaseOperator) String() string { return Token(o).String() } -func (o ParExpOperator) String() string { return Token(o).String() } -func (o UnTestOperator) String() string { return Token(o).String() } -func (o BinTestOperator) String() string { return Token(o).String() } - -// Pos is the internal representation of a position within a source -// file. -type Pos int - -var defaultPos Pos - -const maxPos = Pos(^uint(0) >> 1) - -// Position describes a position within a source file including the line -// and column location. A Position is valid if the line number is > 0. -type Position struct { - Offset int // byte offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (in bytes) -} - -var tokNames = map[Token]string{ - illegalTok: "illegal", - _EOF: "EOF", - _Lit: "Lit", - _LitWord: "LitWord", - - sglQuote: "'", - dblQuote: `"`, - bckQuote: "`", - - And: "&", - AndExpr: "&&", - OrExpr: "||", - Or: "|", - pipeAll: "|&", - - dollar: "$", - dollSglQuote: "$'", - dollDblQuote: `$"`, - dollBrace: "${", - dollBrack: "$[", - dollParen: "$(", - dollDblParen: "$((", - leftBrack: "[", - leftParen: "(", - dblLeftParen: "((", - - rightBrace: "}", - rightBrack: "]", - rightParen: ")", - dblRightParen: "))", - semicolon: ";", - - dblSemicolon: ";;", - semiFall: ";&", - dblSemiFall: ";;&", - - Gtr: ">", - Shr: ">>", - Lss: "<", - rdrInOut: "<>", - dplIn: "<&", - dplOut: ">&", - clbOut: ">|", - Shl: "<<", - dashHdoc: "<<-", - wordHdoc: "<<<", - rdrAll: "&>", - appAll: "&>>", - - cmdIn: "<(", - cmdOut: ">(", - - Add: "+", - ColAdd: ":+", - Sub: "-", - ColSub: ":-", - Quest: "?", - ColQuest: ":?", - Assgn: "=", - ColAssgn: ":=", - Rem: "%", - dblRem: "%%", - Hash: "#", - dblHash: "##", - Xor: "^", - dblXor: "^^", - Comma: ",", - dblComma: ",,", - Quo: "/", - dblQuo: "//", - Colon: ":", - - Mul: "*", - Not: "!", - Inc: "++", - Dec: "--", - Pow: "**", - Eql: "==", - Neq: "!=", - Leq: "<=", - Geq: ">=", - - AddAssgn: "+=", - SubAssgn: "-=", - MulAssgn: "*=", - QuoAssgn: "/=", - RemAssgn: "%=", - AndAssgn: "&=", - OrAssgn: "|=", - XorAssgn: "^=", - ShlAssgn: "<<=", - ShrAssgn: ">>=", - - tsNot: "!", - tsExists: "-e", - tsRegFile: "-f", - tsDirect: "-d", - tsCharSp: "-c", - tsBlckSp: "-b", - tsNmPipe: "-p", - tsSocket: "-S", - tsSmbLink: "-L", - tsGIDSet: "-g", - tsUIDSet: "-u", - tsRead: "-r", - tsWrite: "-w", - tsExec: "-x", - tsNoEmpty: "-s", - tsFdTerm: "-t", - tsEmpStr: "-z", - tsNempStr: "-n", - tsOptSet: "-o", - tsVarSet: "-v", - tsRefVar: "-R", - - tsReMatch: "=~", - tsNewer: "-nt", - tsOlder: "-ot", - tsDevIno: "-ef", - tsEql: "-eq", - tsNeq: "-ne", - tsLeq: "-le", - tsGeq: "-ge", - tsLss: "-lt", - tsGtr: "-gt", - - globQuest: "?(", - globMul: "*(", - globAdd: "+(", - globAt: "@(", - globNot: "!(", -} - -func (t Token) String() string { return tokNames[t] } diff --git a/vendor/github.com/mvdan/sh/syntax/walk.go b/vendor/github.com/mvdan/sh/syntax/walk.go deleted file mode 100644 index 76d3339..0000000 --- a/vendor/github.com/mvdan/sh/syntax/walk.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) 2016, Daniel Martí -// See LICENSE for licensing information - -package syntax - -import "fmt" - -// Visitor holds a Visit method which is invoked for each node -// encountered by Walk. If the result visitor w is not nil, Walk visits -// each of the children of node with the visitor w, followed by a call -// of w.Visit(nil). -type Visitor interface { - Visit(node Node) (w Visitor) -} - -func walkStmts(v Visitor, stmts []*Stmt) { - for _, s := range stmts { - Walk(v, s) - } -} - -func walkWords(v Visitor, words []Word) { - for i := range words { - Walk(v, &words[i]) - } -} - -// Walk traverses an AST in depth-first order: It starts by calling -// v.Visit(node); node must not be nil. If the visitor w returned by -// v.Visit(node) is not nil, Walk is invoked recursively with visitor w -// for each of the non-nil children of node, followed by a call of -// w.Visit(nil). -func Walk(v Visitor, node Node) { - if v = v.Visit(node); v == nil { - return - } - - switch x := node.(type) { - case *File: - walkStmts(v, x.Stmts) - case *Stmt: - if x.Cmd != nil { - Walk(v, x.Cmd) - } - for _, a := range x.Assigns { - Walk(v, a) - } - for _, r := range x.Redirs { - Walk(v, r) - } - case *Assign: - if x.Name != nil { - Walk(v, x.Name) - } - Walk(v, &x.Value) - case *Redirect: - if x.N != nil { - Walk(v, x.N) - } - Walk(v, &x.Word) - if len(x.Hdoc.Parts) > 0 { - Walk(v, &x.Hdoc) - } - case *CallExpr: - walkWords(v, x.Args) - case *Subshell: - walkStmts(v, x.Stmts) - case *Block: - walkStmts(v, x.Stmts) - case *IfClause: - walkStmts(v, x.CondStmts) - walkStmts(v, x.ThenStmts) - for _, elif := range x.Elifs { - walkStmts(v, elif.CondStmts) - walkStmts(v, elif.ThenStmts) - } - walkStmts(v, x.ElseStmts) - case *WhileClause: - walkStmts(v, x.CondStmts) - walkStmts(v, x.DoStmts) - case *UntilClause: - walkStmts(v, x.CondStmts) - walkStmts(v, x.DoStmts) - case *ForClause: - Walk(v, x.Loop) - walkStmts(v, x.DoStmts) - case *WordIter: - Walk(v, &x.Name) - walkWords(v, x.List) - case *CStyleLoop: - if x.Init != nil { - Walk(v, x.Init) - } - if x.Cond != nil { - Walk(v, x.Cond) - } - if x.Post != nil { - Walk(v, x.Post) - } - case *BinaryCmd: - Walk(v, x.X) - Walk(v, x.Y) - case *FuncDecl: - Walk(v, &x.Name) - Walk(v, x.Body) - case *Word: - for _, wp := range x.Parts { - Walk(v, wp) - } - case *Lit: - case *SglQuoted: - case *DblQuoted: - for _, wp := range x.Parts { - Walk(v, wp) - } - case *CmdSubst: - walkStmts(v, x.Stmts) - case *ParamExp: - Walk(v, &x.Param) - if x.Ind != nil { - Walk(v, &x.Ind.Word) - } - if x.Repl != nil { - Walk(v, &x.Repl.Orig) - Walk(v, &x.Repl.With) - } - if x.Exp != nil { - Walk(v, &x.Exp.Word) - } - case *ArithmExp: - if x.X != nil { - Walk(v, x.X) - } - case *ArithmCmd: - if x.X != nil { - Walk(v, x.X) - } - case *BinaryArithm: - Walk(v, x.X) - Walk(v, x.Y) - case *BinaryTest: - Walk(v, x.X) - Walk(v, x.Y) - case *UnaryArithm: - Walk(v, x.X) - case *UnaryTest: - Walk(v, x.X) - case *ParenArithm: - Walk(v, x.X) - case *ParenTest: - Walk(v, x.X) - case *CaseClause: - Walk(v, &x.Word) - for _, pl := range x.List { - walkWords(v, pl.Patterns) - walkStmts(v, pl.Stmts) - } - case *TestClause: - Walk(v, x.X) - case *DeclClause: - walkWords(v, x.Opts) - for _, a := range x.Assigns { - Walk(v, a) - } - case *ArrayExpr: - walkWords(v, x.List) - case *ExtGlob: - Walk(v, &x.Pattern) - case *ProcSubst: - walkStmts(v, x.Stmts) - case *EvalClause: - if x.Stmt != nil { - Walk(v, x.Stmt) - } - case *CoprocClause: - if x.Name != nil { - Walk(v, x.Name) - } - Walk(v, x.Stmt) - case *LetClause: - for _, expr := range x.Exprs { - Walk(v, expr) - } - default: - panic(fmt.Sprintf("ast.Walk: unexpected node type %T", x)) - } - - v.Visit(nil) -} diff --git a/vendor/manifest b/vendor/manifest deleted file mode 100644 index f6627d3..0000000 --- a/vendor/manifest +++ /dev/null @@ -1,13 +0,0 @@ -{ - "version": 0, - "dependencies": [ - { - "importpath": "github.com/mvdan/sh", - "repository": "https://github.com/mvdan/sh", - "vcs": "git", - "revision": "0a4761ee498179b93fe0efcd37758320c6ec73cd", - "branch": "HEAD", - "notests": true - } - ] -} \ No newline at end of file