Repository: mosuka/cete Branch: master Commit: fe731a07a5ee Files: 47 Total size: 273.4 KB Directory structure: gitextract_f8dyezlx/ ├── .github/ │ └── workflows/ │ ├── PR.yaml │ ├── master.yaml │ └── tags.yaml ├── .gitignore ├── CHANGES.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── client/ │ └── grpc_client.go ├── cmd/ │ ├── cluster.go │ ├── delete.go │ ├── get.go │ ├── healthcheck.go │ ├── join.go │ ├── leave.go │ ├── metrics.go │ ├── node.go │ ├── root.go │ ├── set.go │ ├── snapshot.go │ ├── start.go │ ├── variables.go │ ├── version.go │ └── watch.go ├── docker-entrypoint.sh ├── errors/ │ └── errors.go ├── etc/ │ └── cete.yaml ├── go.mod ├── go.sum ├── log/ │ └── log.go ├── main.go ├── marshaler/ │ ├── marshaler.go │ ├── util.go │ └── util_test.go ├── metric/ │ └── metric.go ├── protobuf/ │ ├── kvs.pb.go │ ├── kvs.pb.gw.go │ └── kvs.proto ├── registry/ │ └── type.go ├── server/ │ ├── grpc_gateway.go │ ├── grpc_server.go │ ├── grpc_service.go │ ├── raft_fsm.go │ └── raft_server.go ├── storage/ │ └── kvs.go └── version/ └── version.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/PR.yaml ================================================ on: pull_request: branches: - master jobs: build: name: test runs-on: ubuntu-latest container: image: golang:1.14-buster volumes: - "/home/runner/work/$GITHUB_REPOSITORY:/go/src/github.com/$GITHUB_REPOSITORY" steps: - uses: actions/checkout@v1 - name: restore from cache uses: actions/cache@v1 with: path: /go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - name: download dependencies if not cached run: | if [ ! -d "/go/pkg/mod" ]; then go mod tidy fi - name: test run: make test env: CGO_ENABLED: 1 ================================================ FILE: .github/workflows/master.yaml ================================================ on: push: branches: - master jobs: build: name: build runs-on: ubuntu-latest container: image: golang:1.14-buster volumes: - "/home/runner/work/$GITHUB_REPOSITORY:/go/src/github.com/$GITHUB_REPOSITORY" steps: - uses: actions/checkout@v1 - name: restore from cache uses: actions/cache@v1 with: path: /go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - name: build run: | go install github.com/mitchellh/gox make build - name: compress build binary run: | cd bin && tar -czvf ../dist.tar.gz cete - name: upload compressed dist to artifacts uses: actions/upload-artifact@v1 with: name: distribution path: dist.tar.gz docker: name: docker runs-on: ubuntu-latest needs: - build steps: - uses: actions/checkout@v1 - name: downloads compressed dist uses: actions/download-artifact@v1 with: name: distribution - name: untar dist.tar.gz run: | if [ ! -d "$(pwd)/bin" ]; then mkdir bin fi tar -xzvf distribution/dist.tar.gz -C bin rm -rf distribution/ - name: build docker image run: | docker build . --file Dockerfile --tag $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY) docker tag $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY) $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY):${{ github.sha }} - name: log into docker registry run: echo "${{ secrets.DOCKER_PSW }}" | docker login -u ${{ secrets.DOCKER_USR }} --password-stdin - name: push image to docker registry run: | # pushes unique commit sha based tag docker push $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY) docker push $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY):${{ github.sha }} ================================================ FILE: .github/workflows/tags.yaml ================================================ on: push: tags: - "v*" jobs: build: name: build runs-on: ubuntu-latest container: image: golang:1.14-buster volumes: - "/home/runner/work/$GITHUB_REPOSITORY:/go/src/github.com/$GITHUB_REPOSITORY" steps: - uses: actions/checkout@v1 - name: restore from cache uses: actions/cache@v1 with: path: /go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - name: create multiple OS dists run: | go install github.com/mitchellh/gox VERSION=$(basename $GITHUB_REF) make dist - name: compress build binary run: | tar -czvf dist.tar.gz dist - name: upload compressed dist to artifacts uses: actions/upload-artifact@v1 with: name: distribution path: dist.tar.gz docker: name: docker runs-on: ubuntu-latest needs: - build steps: - uses: actions/checkout@v1 - name: downloads compressed dist uses: actions/download-artifact@v1 with: name: distribution - name: untar dist.tar.gz run: | if [ ! -d "$(pwd)/bin" ]; then mkdir bin fi tar -xzvf distribution/dist.tar.gz mv dist/cete_linux_amd64 bin/cete rm -rf distribution/ dist/ - name: build docker image run: docker build . --file Dockerfile --tag $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY):$(basename $GITHUB_REF) - name: log into docker registry run: echo "${{ secrets.DOCKER_PSW }}" | docker login -u ${{ secrets.DOCKER_USR }} --password-stdin - name: push image to docker registry run: | # pushes unique commit sha based tag docker push $(dirname $GITHUB_REPOSITORY)/$(basename $GITHUB_REPOSITORY):$(basename $GITHUB_REF) release: name: release runs-on: ubuntu-latest needs: - docker steps: - uses: actions/checkout@v2 - name: downloads compressed dist uses: actions/download-artifact@v1 with: name: distribution - name: create dists .tar.gz run: | tar -xzvf distribution/dist.tar.gz for dist_file in $(ls dist); do (cd dist && tar -czvf ../$(basename -- "${dist_file%.*}").tar.gz $dist_file) || exit 1 done rm -rf distribution/ dist/ - run: | set -x assets=() for asset in $(ls *.tar.gz); do assets+=("-a" "$asset") done tag_name=$(basename $GITHUB_REF) hub release create "${assets[@]}" -m "$tag_name" "$tag_name" env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .gitignore ================================================ .DS_Store .classpath .project .idea/ bin/ dist/ *.pem *.csr cover.out cover.html ================================================ FILE: CHANGES.md ================================================ # Change Log All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] - Fix go.mod dependency due to a change in the case #50 @mosuka - Add garbage collector for badger #48 @christian-roggia - adds cicd #41 @vniche - adds scan endpoint #35 @vniche - Fix bug in getting leader ID #34 @mosuka ## [v0.3.1] 2020-04-01 - Update protobuf #33 @mosuka ## [v0.3.0] 2020-03-31 - Add health check endpoints #32 @mosuka - Add some metrics #31 @mosuka - Allow CLI options to be read from the configuration file #29 @mosuka - Fix gateway bug #26 @mosuka - Support TLS #25 @mosuka - Add keepalive options #24 @mosuka - Improve cluster watching #22 @mosuka - Refactoring #21 @mosuka - Update Makefile #20 @mosuka ## [v0.2.0] 2020-03-19 - Add join endpoint #19 @mosuka - Add leave endpoint #18 @mosuka - Add snapshot endpoint #17 @mosuka - Disable raft-badgerdb logging #16 @mosuka - Migrate to grpc-gateway #15 @mosuka - Add metrics command #14 @mosuka - Use raft-badger #13 @mosuka - Refactoring #12 @mosuka - Refactoring #11 @mosuka - Refactoring #10 @mosuka - Upgrade Badger #9 @mosuka - Upgrade Raft #8 @mosuka - Refactoring #7 @mosuka ## [v0.1.1] 2019-11-05 - Fix bugs in defer #5 @mosuka ## [v0.1.0] 2019-03-30 - First release @mosuka ================================================ FILE: Dockerfile ================================================ FROM alpine:3.11 LABEL maintainer="Minoru Osuka minoru.osuka@gmail.com" LABEL maintainer="Vinícius Niche Correa viniciusnichecorrea@gmail.com" RUN apk update && \ rm -rf /var/cache/apk/* RUN addgroup cete \ && adduser -S cete -u 1000 -G cete USER cete COPY --chown=cete:cete bin/cete /usr/bin/ EXPOSE 7000 8000 9000 ENTRYPOINT [ "/usr/bin/cete" ] CMD [ "start" ] ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2019 Minoru Osuka Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ GOOS ?= GOARCH ?= GO111MODULE ?= on CGO_ENABLED ?= 0 CGO_CFLAGS ?= CGO_LDFLAGS ?= BUILD_TAGS ?= VERSION ?= BIN_EXT ?= DOCKER_REPOSITORY ?= mosuka PACKAGES = $(shell $(GO) list ./... | grep -v '/vendor/') PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) TARGET_PACKAGES = $(shell find $(CURDIR) -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) ifeq ($(GOOS),) GOOS = $(shell go version | awk -F ' ' '{print $$NF}' | awk -F '/' '{print $$1}') endif ifeq ($(GOARCH),) GOARCH = $(shell go version | awk -F ' ' '{print $$NF}' | awk -F '/' '{print $$2}') endif ifeq ($(VERSION),) VERSION = latest endif LDFLAGS = -ldflags "-X \"github.com/mosuka/cete/version.Version=$(VERSION)\"" ifeq ($(GOOS),windows) BIN_EXT = .exe endif BUILD_FLAGS := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_CFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) GO111MODULE=$(GO111MODULE) GO := $(BUILD_FLAGS) go GOX := $(BUILD_FLAGS) gox .DEFAULT_GOAL := build .PHONY: show-env show-env: @echo ">> show env" @echo " GOOS = $(GOOS)" @echo " GOARCH = $(GOARCH)" @echo " GO111MODULE = $(GO111MODULE)" @echo " CGO_ENABLED = $(CGO_ENABLED)" @echo " CGO_CFLAGS = $(CGO_CFLAGS)" @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" @echo " BUILD_TAGS = $(BUILD_TAGS)" @echo " VERSION = $(VERSION)" @echo " BIN_EXT = $(BIN_EXT)" @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" @echo " LDFLAGS = $(LDFLAGS)" @echo " PACKAGES = $(PACKAGES)" @echo " PROTOBUFS = $(PROTOBUFS)" @echo " TARGET_PACKAGES = $(TARGET_PACKAGES)" @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" .PHONY: protoc protoc: show-env @echo ">> generating proto3 code" for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done .PHONY: format format: show-env @echo ">> formatting code" $(GO) fmt $(PACKAGES) .PHONY: test test: show-env @echo ">> testing all packages" $(GO) test -covermode=atomic -v -tags="$(BUILD_TAGS)" $(PACKAGES) .PHONY: coverage coverage: show-env @echo ">> checking coverage of all packages" $(GO) test -covermode=atomic -coverprofile cover.out -tags="$(BUILD_TAGS)" $(PACKAGES) $(GO) tool cover -html=cover.out -o cover.html .PHONY: clean clean: show-env @echo ">> cleaning binaries" rm -rf ./bin rm -rf ./data rm -rf ./dist .PHONY: build build: show-env @echo ">> building binaries" mkdir -p bin $(GOX) -osarch="$(GOOS)/amd64" -tags="$(BUILD_TAGS)" $(LDFLAGS) -output bin/cete .PHONY: install install: show-env @echo ">> installing binaries" for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done .PHONY: dist dist: show-env @echo ">> packaging binaries" mkdir dist $(GOX) -osarch="linux/amd64" -osarch="darwin/amd64" -osarch="windows/amd64" -tags="$(BUILD_TAGS)" $(LDFLAGS) -output "dist/{{.Dir}}_{{.OS}}_{{.Arch}}" .PHONY: list-tag list-tag: @echo ">> listing github tags" git tag -l --sort=-v:refname .PHONY: tag tag: show-env @echo ">> tagging github" ifeq ($(VERSION),$(filter $(VERSION),latest master "")) @echo "please specify VERSION" else git tag -a $(VERSION) -m "Release $(VERSION)" git push origin $(VERSION) endif .PHONY: docker-build docker-build: show-env @echo ">> building docker container image" docker build -t $(DOCKER_REPOSITORY)/cete:latest --build-arg VERSION=$(VERSION) . docker tag $(DOCKER_REPOSITORY)/cete:latest $(DOCKER_REPOSITORY)/cete:$(VERSION) .PHONY: docker-push docker-push: show-env @echo ">> pushing docker container image" docker push $(DOCKER_REPOSITORY)/cete:latest docker push $(DOCKER_REPOSITORY)/cete:$(VERSION) .PHONY: docker-clean docker-clean: show-env docker rmi -f $(shell docker images --filter "dangling=true" -q --no-trunc) .PHONY: cert cert: show-env @echo ">> generating certification" openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/cete-key.pem -out ./etc/cete-cert.pem -days 365 -subj '/CN=localhost' ================================================ FILE: README.md ================================================ # Cete Cete is a distributed key value store server written in [Go](https://golang.org) built on top of [BadgerDB](https://blog.dgraph.io/post/badger/). It provides functions through [gRPC](http://www.grpc.io) ([HTTP/2](https://en.wikipedia.org/wiki/HTTP/2) + [Protocol Buffers](https://developers.google.com/protocol-buffers/)) or traditional [RESTful](https://en.wikipedia.org/wiki/Representational_state_transfer) API ([HTTP/1.1](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) + [JSON](http://www.json.org)). Cete implements [Raft consensus algorithm](https://raft.github.io/) by [hashicorp/raft](https://github.com/hashicorp/raft). It achieve consensus across all the instances of the nodes, ensuring that every change made to the system is made to a quorum of nodes, or none at all. Cete makes it easy bringing up a cluster of BadgerDB (a cete of badgers) . ## Features - Easy deployment - Bringing up cluster - Database replication - An easy-to-use HTTP API - CLI is also available - Docker container image is available ## Building Cete When you satisfied dependencies, let's build Cete for Linux as following: ```bash $ mkdir -p ${GOPATH}/src/github.com/mosuka $ cd ${GOPATH}/src/github.com/mosuka $ git clone https://github.com/mosuka/cete.git $ cd cete $ make build ``` If you want to build for other platform, set `GOOS`, `GOARCH` environment variables. For example, build for macOS like following: ```bash $ make GOOS=darwin build ``` ### Binaries You can see the binary file when build successful like so: ```bash $ ls ./bin cete ``` ## Testing Cete If you want to test your changes, run command like following: ```bash $ make test ``` ## Packaging Cete ### Linux ```bash $ make GOOS=linux dist ``` ### macOS ```bash $ make GOOS=darwin dist ``` ## Configure Cete | CLI Flag | Environment variable | Configuration File | Description | | --- | --- | --- | --- | | --config-file | - | - | config file. if omitted, cete.yaml in /etc and home directory will be searched | | --id | CETE_ID | id | node ID | | --raft-address | CETE_RAFT_ADDRESS | raft_address | Raft server listen address | | --grpc-address | CETE_GRPC_ADDRESS | grpc_address | gRPC server listen address | | --http-address | CETE_HTTP_ADDRESS | http_address | HTTP server listen address | | --data-directory | CETE_DATA_DIRECTORY | data_directory | data directory which store the key-value store data and Raft logs | | --peer-grpc-address | CETE_PEER_GRPC_ADDRESS | peer_grpc_address | listen address of the existing gRPC server in the joining cluster | | --certificate-file | CETE_CERTIFICATE_FILE | certificate_file | path to the client server TLS certificate file | | --key-file | CETE_KEY_FILE | key_file | path to the client server TLS key file | | --common-name | CETE_COMMON_NAME | common_name | certificate common name | | --log-level | CETE_LOG_LEVEL | log_level | log level | | --log-file | CETE_LOG_FILE | log_file | log file | | --log-max-size | CETE_LOG_MAX_SIZE | log_max_size | max size of a log file in megabytes | | --log-max-backups | CETE_LOG_MAX_BACKUPS | log_max_backups | max backup count of log files | | --log-max-age | CETE_LOG_MAX_AGE | log_max_age | max age of a log file in days | | --log-compress | CETE_LOG_COMPRESS | log_compress | compress a log file | ## Starting Cete node Starting cete is easy as follows: ```bash $ ./bin/cete start --id=node1 --raft-address=:7000 --grpc-address=:9000 --http-address=:8000 --data-directory=/tmp/cete/node1 ``` You can get the node information with the following command: ```bash $ ./bin/cete node | jq . ``` or the following URL: ```bash $ curl -X GET http://localhost:8000/v1/node | jq . ``` The result of the above command is: ```json { "node": { "raft_address": ":7000", "metadata": { "grpc_address": ":9000", "http_address": ":8000" }, "state": "Leader" } } ``` ## Health check You can check the health status of the node. ```bash $ ./bin/cete healthcheck | jq . ``` Also provides the following REST APIs ### Liveness prove This endpoint always returns 200 and should be used to check Cete health. ```bash $ curl -X GET http://localhost:8000/v1/liveness_check | jq . ``` ### Readiness probe This endpoint returns 200 when Cete is ready to serve traffic (i.e. respond to queries). ```bash $ curl -X GET http://localhost:8000/v1/readiness_check | jq . ``` ## Putting a key-value To put a key-value, execute the following command: ```bash $ ./bin/cete set 1 value1 ``` or, you can use the RESTful API as follows: ```bash $ curl -X PUT 'http://127.0.0.1:8000/v1/data/1' --data-binary value1 $ curl -X PUT 'http://127.0.0.1:8000/v1/data/2' -H "Content-Type: image/jpeg" --data-binary @/path/to/photo.jpg ``` ## Getting a key-value To get a key-value, execute the following command: ```bash $ ./bin/cete get 1 ``` or, you can use the RESTful API as follows: ```bash $ curl -X GET 'http://127.0.0.1:8000/v1/data/1' ``` You can see the result. The result of the above command is: ```text value1 ``` ## Deleting a key-value Deleting a value by key, execute the following command: ```bash $ ./bin/cete delete 1 ``` or, you can use the RESTful API as follows: ```bash $ curl -X DELETE 'http://127.0.0.1:8000/v1/data/1' ``` ## Bringing up a cluster Cete is easy to bring up the cluster. Cete node is already running, but that is not fault tolerant. If you need to increase the fault tolerance, bring up 2 more data nodes like so: ```bash $ ./bin/cete start --id=node2 --raft-address=:7001 --grpc-address=:9001 --http-address=:8001 --data-directory=/tmp/cete/node2 --peer-grpc-address=:9000 $ ./bin/cete start --id=node3 --raft-address=:7002 --grpc-address=:9002 --http-address=:8002 --data-directory=/tmp/cete/node3 --peer-grpc-address=:9000 ``` _Above example shows each Cete node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ This instructs each new node to join an existing node, each node recognizes the joining clusters when started. So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the cluster with the following command: ```bash $ ./bin/cete cluster | jq . ``` or, you can use the RESTful API as follows: ```bash $ curl -X GET 'http://127.0.0.1:8000/v1/cluster' | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { "cluster": { "nodes": { "node1": { "raft_address": ":7000", "metadata": { "grpc_address": ":9000", "http_address": ":8000" }, "state": "Leader" }, "node2": { "raft_address": ":7001", "metadata": { "grpc_address": ":9001", "http_address": ":8001" }, "state": "Follower" }, "node3": { "raft_address": ":7002", "metadata": { "grpc_address": ":9002", "http_address": ":8002" }, "state": "Follower" } }, "leader": "node1" } } ``` Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, data loss is inevitable, so avoid deploying single nodes. The above example, the node joins to the cluster at startup, but you can also join the node that already started on standalone mode to the cluster later, as follows: ```bash $ ./bin/cete join --grpc-addr=:9000 node2 127.0.0.1:9001 ``` or, you can use the RESTful API as follows: ```bash $ curl -X PUT 'http://127.0.0.1:8000/v1/cluster/node2' --data-binary ' { "raft_address": ":7001", "metadata": { "grpc_address": ":9001", "http_address": ":8001" } } ' ``` To remove a node from the cluster, execute the following command: ```bash $ ./bin/cete leave --grpc-addr=:9000 node2 ``` or, you can use the RESTful API as follows: ```bash $ curl -X DELETE 'http://127.0.0.1:8000/v1/cluster/node2' ``` The following command indexes documents to any node in the cluster: ```bash $ ./bin/cete set 1 value1 --grpc-address=:9000 ``` So, you can get the document from the node specified by the above command as follows: ```bash $ ./bin/cete get 1 --grpc-address=:9000 ``` You can see the result. The result of the above command is: ```text value1 ``` You can also get the same document from other nodes in the cluster as follows: ```bash $ ./bin/cete get 1 --grpc-address=:9001 $ ./bin/cete get 1 --grpc-address=:9002 ``` You can see the result. The result of the above command is: ```text value1 ``` ## Cete on Docker ### Building Cete Docker container image on localhost You can build the Docker container image like so: ```bash $ make docker-build ``` ### Pulling Cete Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: ```bash $ docker pull mosuka/cete:latest ``` See https://hub.docker.com/r/mosuka/cete/tags/ ### Pulling Cete Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: ```bash $ docker pull mosuka/cete:latest ``` ### Running Cete node on Docker Running a Cete data node on Docker. Start Cete node like so: ```bash $ docker run --rm --name cete-node1 \ -p 7000:7000 \ -p 8000:8000 \ -p 9000:9000 \ mosuka/cete:latest cete start \ --id=node1 \ --raft-address=:7000 \ --grpc-address=:9000 \ --http-address=:8000 \ --data-directory=/tmp/cete/node1 ``` You can execute the command in docker container as follows: ```bash $ docker exec -it cete-node1 cete node --grpc-address=:9000 ``` ## Securing Cete Cete supports HTTPS access, ensuring that all communication between clients and a cluster is encrypted. ### Generating a certificate and private key One way to generate the necessary resources is via [openssl](https://www.openssl.org/). For example: ```bash $ openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/cete-key.pem -out ./etc/cete-cert.pem -days 365 -subj '/CN=localhost' Generating a 4096 bit RSA private key ............................++ ........++ writing new private key to 'key.pem' ``` ### Secure cluster example Starting a node with HTTPS enabled, node-to-node encryption, and with the above configuration file. It is assumed the HTTPS X.509 certificate and key are at the paths server.crt and key.pem respectively. ```bash $ ./bin/cete start --id=node1 --raft-address=:7000 --grpc-address=:9000 --http-address=:8000 --data-directory=/tmp/cete/node1 --peer-grpc-address=:9000 --certificate-file=./etc/cert.pem --key-file=./etc/key.pem --common-name=localhost $ ./bin/cete start --id=node2 --raft-address=:7001 --grpc-address=:9001 --http-address=:8001 --data-directory=/tmp/cete/node2 --peer-grpc-address=:9000 --certificate-file=./etc/cert.pem --key-file=./etc/key.pem --common-name=localhost $ ./bin/cete start --id=node3 --raft-address=:7002 --grpc-address=:9002 --http-address=:8002 --data-directory=/tmp/cete/node3 --peer-grpc-address=:9000 --certificate-file=./etc/cert.pem --key-file=./etc/key.pem --common-name=localhost ``` You can access the cluster by adding a flag, such as the following command: ```bash $ ./bin/cete cluster --grpc-address=:9000 --certificate-file=./cert.pem --common-name=localhost | jq . ``` or ```bash $ curl -X GET https://localhost:8000/v1/cluster --cacert ./cert.pem | jq . ``` ================================================ FILE: client/grpc_client.go ================================================ package client import ( "context" "log" "math" "time" "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/cete/errors" "github.com/mosuka/cete/protobuf" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) type GRPCClient struct { ctx context.Context cancel context.CancelFunc conn *grpc.ClientConn client protobuf.KVSClient logger *log.Logger } func NewGRPCClient(grpc_address string) (*GRPCClient, error) { return NewGRPCClientWithContext(grpc_address, context.Background()) } func NewGRPCClientWithContext(grpc_address string, baseCtx context.Context) (*GRPCClient, error) { return NewGRPCClientWithContextTLS(grpc_address, baseCtx, "", "") } func NewGRPCClientWithContextTLS(grpcAddress string, baseCtx context.Context, certificateFile string, commonName string) (*GRPCClient, error) { dialOpts := []grpc.DialOption{ grpc.WithDefaultCallOptions( grpc.MaxCallSendMsgSize(math.MaxInt64), grpc.MaxCallRecvMsgSize(math.MaxInt64), ), grpc.WithKeepaliveParams( keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 5 * time.Second, PermitWithoutStream: true, }, ), } ctx, cancel := context.WithCancel(baseCtx) if certificateFile == "" { dialOpts = append(dialOpts, grpc.WithInsecure()) } else { creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) if err != nil { return nil, err } dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) } conn, err := grpc.DialContext(ctx, grpcAddress, dialOpts...) if err != nil { cancel() return nil, err } return &GRPCClient{ ctx: ctx, cancel: cancel, conn: conn, client: protobuf.NewKVSClient(conn), }, nil } func (c *GRPCClient) Close() error { c.cancel() if c.conn != nil { return c.conn.Close() } return c.ctx.Err() } func (c *GRPCClient) Target() string { return c.conn.Target() } func (c *GRPCClient) LivenessCheck(opts ...grpc.CallOption) (*protobuf.LivenessCheckResponse, error) { if resp, err := c.client.LivenessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { return nil, err } else { return resp, nil } } func (c *GRPCClient) ReadinessCheck(opts ...grpc.CallOption) (*protobuf.ReadinessCheckResponse, error) { if resp, err := c.client.ReadinessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { return nil, err } else { return resp, nil } } func (c *GRPCClient) Join(req *protobuf.JoinRequest, opts ...grpc.CallOption) error { if _, err := c.client.Join(c.ctx, req, opts...); err != nil { return err } return nil } func (c *GRPCClient) Leave(req *protobuf.LeaveRequest, opts ...grpc.CallOption) error { if _, err := c.client.Leave(c.ctx, req, opts...); err != nil { return err } return nil } func (c *GRPCClient) Node(opts ...grpc.CallOption) (*protobuf.NodeResponse, error) { if resp, err := c.client.Node(c.ctx, &empty.Empty{}, opts...); err != nil { return nil, err } else { return resp, nil } } func (c *GRPCClient) Cluster(opts ...grpc.CallOption) (*protobuf.ClusterResponse, error) { if resp, err := c.client.Cluster(c.ctx, &empty.Empty{}, opts...); err != nil { return nil, err } else { return resp, nil } } func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { if _, err := c.client.Snapshot(c.ctx, &empty.Empty{}); err != nil { return err } return nil } func (c *GRPCClient) Get(req *protobuf.GetRequest, opts ...grpc.CallOption) (*protobuf.GetResponse, error) { if resp, err := c.client.Get(c.ctx, req, opts...); err != nil { st, _ := status.FromError(err) switch st.Code() { case codes.NotFound: return nil, errors.ErrNotFound default: return nil, err } } else { return resp, nil } } func (c *GRPCClient) Set(req *protobuf.SetRequest, opts ...grpc.CallOption) error { if _, err := c.client.Set(c.ctx, req, opts...); err != nil { return err } return nil } func (c *GRPCClient) Delete(req *protobuf.DeleteRequest, opts ...grpc.CallOption) error { if _, err := c.client.Delete(c.ctx, req, opts...); err != nil { return err } return nil } func (c *GRPCClient) Watch(req *empty.Empty, opts ...grpc.CallOption) (protobuf.KVS_WatchClient, error) { return c.client.Watch(c.ctx, req, opts...) } func (c *GRPCClient) Metrics(opts ...grpc.CallOption) (*protobuf.MetricsResponse, error) { if resp, err := c.client.Metrics(c.ctx, &empty.Empty{}, opts...); err != nil { return nil, err } else { return resp, nil } } ================================================ FILE: cmd/cluster.go ================================================ package cmd import ( "context" "encoding/json" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( clusterCmd = &cobra.Command{ Use: "cluster", Short: "Get the cluster info", Long: "Get the cluster info", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() resp, err := c.Cluster() if err != nil { return err } respBytes, err := json.Marshal(resp) if err != nil { return err } fmt.Println(string(respBytes)) return nil }, } ) func init() { rootCmd.AddCommand(clusterCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) clusterCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") clusterCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") clusterCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") clusterCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", clusterCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", clusterCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", clusterCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/delete.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/protobuf" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( deleteCmd = &cobra.Command{ Use: "delete KEY", Args: cobra.ExactArgs(1), Short: "Delete a key-value", Long: "Delete a key-value", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") key := args[0] c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() req := &protobuf.DeleteRequest{ Key: key, } if err := c.Delete(req); err != nil { return err } return nil }, } ) func init() { rootCmd.AddCommand(deleteCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) deleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") deleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") deleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") deleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", deleteCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", deleteCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", deleteCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/get.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/protobuf" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( getCmd = &cobra.Command{ Use: "get KEY", Args: cobra.ExactArgs(1), Short: "Get a key-value", Long: "Get a key-value", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") key := args[0] c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() req := &protobuf.GetRequest{ Key: key, } resp, err := c.Get(req) if err != nil { return err } fmt.Println(string(resp.Value)) return nil }, } ) func init() { rootCmd.AddCommand(getCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) getCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") getCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") getCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") getCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", getCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", getCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", getCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/healthcheck.go ================================================ package cmd import ( "context" "encoding/json" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( healthCheckCmd = &cobra.Command{ Use: "healthcheck", Short: "Health check a node", Long: "Health check a node", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() lResp, err := c.LivenessCheck() if err != nil { return err } rResp, err := c.ReadinessCheck() if err != nil { return err } resp := map[string]bool{ "liveness": lResp.Alive, "readiness:": rResp.Ready, } respBytes, err := json.Marshal(resp) if err != nil { return err } fmt.Println(string(respBytes)) return nil }, } ) func init() { rootCmd.AddCommand(healthCheckCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) healthCheckCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") healthCheckCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") healthCheckCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") healthCheckCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", healthCheckCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", healthCheckCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", healthCheckCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/join.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/protobuf" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( joinCmd = &cobra.Command{ Use: "join ID GRPC_ADDRESS", Args: cobra.ExactArgs(2), Short: "Join a node to the cluster", Long: "Join a node to the cluster", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") id := args[0] targetGrpcAddress := args[1] t, err := client.NewGRPCClientWithContextTLS(targetGrpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = t.Close() }() nodeResp, err := t.Node() if err != nil { return err } c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() req := &protobuf.JoinRequest{ Id: id, Node: nodeResp.Node, } if err := c.Join(req); err != nil { return err } return nil }, } ) func init() { rootCmd.AddCommand(joinCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) joinCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") joinCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") joinCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") joinCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", joinCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", joinCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", joinCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/leave.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/protobuf" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( leaveCmd = &cobra.Command{ Use: "leave ID", Args: cobra.ExactArgs(1), Short: "Leave a node from the cluster", Long: "Leave a node from the cluster", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") id := args[0] c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() req := &protobuf.LeaveRequest{ Id: id, } if err := c.Leave(req); err != nil { return err } return nil }, } ) func init() { rootCmd.AddCommand(leaveCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) leaveCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") leaveCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") leaveCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") leaveCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", leaveCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", leaveCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", leaveCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/metrics.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( metricsCmd = &cobra.Command{ Use: "metrics", Short: "Get the node metrics", Long: "Get the node metrics in Prometheus exposition format", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() resp, err := c.Metrics() if err != nil { return err } fmt.Println(string(resp.Metrics)) return nil }, } ) func init() { rootCmd.AddCommand(metricsCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) metricsCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") metricsCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") metricsCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") metricsCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", metricsCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", metricsCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", metricsCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/node.go ================================================ package cmd import ( "context" "encoding/json" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( nodeCmd = &cobra.Command{ Use: "node", Short: "Get the node info", Long: "Get the node info", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() resp, err := c.Node() if err != nil { return err } respBytes, err := json.Marshal(resp) if err != nil { return err } fmt.Println(string(respBytes)) return nil }, } ) func init() { rootCmd.AddCommand(nodeCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) nodeCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") nodeCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") nodeCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") nodeCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", nodeCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", nodeCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", nodeCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/root.go ================================================ package cmd import ( "github.com/spf13/cobra" ) var ( rootCmd = &cobra.Command{ Use: "cete", Short: "The lightweight distributed key value store server", Long: "The lightweight distributed key value store server", } ) func Execute() error { return rootCmd.Execute() } ================================================ FILE: cmd/set.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/protobuf" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( setCmd = &cobra.Command{ Use: "set KEY VALUE", Args: cobra.ExactArgs(2), Short: "Set a key-value", Long: "Set a key-value", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") key := args[0] value := args[1] c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() req := &protobuf.SetRequest{ Key: key, Value: []byte(value), } if err := c.Set(req); err != nil { return err } return nil }, } ) func init() { rootCmd.AddCommand(setCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) setCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") setCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") setCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") setCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", setCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", setCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", setCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/snapshot.go ================================================ package cmd import ( "context" "fmt" "os" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( snapshotCmd = &cobra.Command{ Use: "snapshot", Short: "Create a snapshot", Long: "Create a snapshot which is full-volume copy of data stored on the node", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() if err := c.Snapshot(); err != nil { return err } return nil }, } ) func init() { rootCmd.AddCommand(snapshotCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) snapshotCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") snapshotCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") snapshotCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") snapshotCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", snapshotCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", snapshotCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", snapshotCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: cmd/start.go ================================================ package cmd import ( "context" "fmt" "os" "os/signal" "syscall" "time" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/log" "github.com/mosuka/cete/protobuf" "github.com/mosuka/cete/server" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( startCmd = &cobra.Command{ Use: "start", Short: "Start the key value store server", Long: "Start the key value store server", RunE: func(cmd *cobra.Command, args []string) error { id = viper.GetString("id") raftAddress = viper.GetString("raft_address") grpcAddress = viper.GetString("grpc_address") httpAddress = viper.GetString("http_address") dataDirectory = viper.GetString("data_directory") peerGrpcAddress = viper.GetString("peer_grpc_address") certificateFile = viper.GetString("certificate_file") keyFile = viper.GetString("key_file") commonName = viper.GetString("common_name") logLevel = viper.GetString("log_level") logFile = viper.GetString("log_file") logMaxSize = viper.GetInt("log_max_size") logMaxBackups = viper.GetInt("log_max_backups") logMaxAge = viper.GetInt("log_max_age") logCompress = viper.GetBool("log_compress") logger := log.NewLogger( logLevel, logFile, logMaxSize, logMaxBackups, logMaxAge, logCompress, ) bootstrap := peerGrpcAddress == "" || peerGrpcAddress == grpcAddress raftServer, err := server.NewRaftServer(id, raftAddress, dataDirectory, bootstrap, logger) if err != nil { return err } grpcServer, err := server.NewGRPCServer(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) if err != nil { return err } grpcGateway, err := server.NewGRPCGateway(httpAddress, grpcAddress, certificateFile, keyFile, commonName, logger) if err != nil { return err } quitCh := make(chan os.Signal, 1) signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) if err := raftServer.Start(); err != nil { return err } if err := grpcServer.Start(); err != nil { return err } if err := grpcGateway.Start(); err != nil { return err } // wait for detect leader if it's bootstrap if bootstrap { timeout := 60 * time.Second if err := raftServer.WaitForDetectLeader(timeout); err != nil { return err } } // create gRPC client for joining node var joinGrpcAddress string if bootstrap { joinGrpcAddress = grpcAddress } else { joinGrpcAddress = peerGrpcAddress } c, err := client.NewGRPCClientWithContextTLS(joinGrpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() // join this node to the existing cluster joinRequest := &protobuf.JoinRequest{ Id: id, Node: &protobuf.Node{ RaftAddress: raftAddress, Metadata: &protobuf.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, }, }, } if err = c.Join(joinRequest); err != nil { return err } // wait for receiving signal <-quitCh _ = grpcGateway.Stop() _ = grpcServer.Stop() _ = raftServer.Stop() return nil }, } ) func init() { rootCmd.AddCommand(startCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) startCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") startCmd.PersistentFlags().StringVar(&id, "id", "node1", "node ID") startCmd.PersistentFlags().StringVar(&raftAddress, "raft-address", ":7000", "Raft server listen address") startCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") startCmd.PersistentFlags().StringVar(&httpAddress, "http-address", ":8000", "HTTP server listen address") startCmd.PersistentFlags().StringVar(&dataDirectory, "data-directory", "/tmp/cete/data", "data directory which store the key-value store data and Raft logs") startCmd.PersistentFlags().StringVar(&peerGrpcAddress, "peer-grpc-address", "", "listen address of the existing gRPC server in the joining cluster") startCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") startCmd.PersistentFlags().StringVar(&keyFile, "key-file", "", "path to the client server TLS key file") startCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") startCmd.PersistentFlags().StringVar(&logLevel, "log-level", "INFO", "log level") startCmd.PersistentFlags().StringVar(&logFile, "log-file", os.Stderr.Name(), "log file") startCmd.PersistentFlags().IntVar(&logMaxSize, "log-max-size", 500, "max size of a log file in megabytes") startCmd.PersistentFlags().IntVar(&logMaxBackups, "log-max-backups", 3, "max backup count of log files") startCmd.PersistentFlags().IntVar(&logMaxAge, "log-max-age", 30, "max age of a log file in days") startCmd.PersistentFlags().BoolVar(&logCompress, "log-compress", false, "compress a log file") _ = viper.BindPFlag("id", startCmd.PersistentFlags().Lookup("id")) _ = viper.BindPFlag("raft_address", startCmd.PersistentFlags().Lookup("raft-address")) _ = viper.BindPFlag("grpc_address", startCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("http_address", startCmd.PersistentFlags().Lookup("http-address")) _ = viper.BindPFlag("data_directory", startCmd.PersistentFlags().Lookup("data-directory")) _ = viper.BindPFlag("peer_grpc_address", startCmd.PersistentFlags().Lookup("peer-grpc-address")) _ = viper.BindPFlag("certificate_file", startCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("key_file", startCmd.PersistentFlags().Lookup("key-file")) _ = viper.BindPFlag("common_name", startCmd.PersistentFlags().Lookup("common-name")) _ = viper.BindPFlag("log_level", startCmd.PersistentFlags().Lookup("log-level")) _ = viper.BindPFlag("log_max_size", startCmd.PersistentFlags().Lookup("log-max-size")) _ = viper.BindPFlag("log_max_backups", startCmd.PersistentFlags().Lookup("log-max-backups")) _ = viper.BindPFlag("log_max_age", startCmd.PersistentFlags().Lookup("log-max-age")) _ = viper.BindPFlag("log_compress", startCmd.PersistentFlags().Lookup("log-compress")) } ================================================ FILE: cmd/variables.go ================================================ package cmd var ( configFile string id string raftAddress string grpcAddress string httpAddress string dataDirectory string peerGrpcAddress string certificateFile string keyFile string commonName string logLevel string logFile string logMaxSize int logMaxBackups int logMaxAge int logCompress bool ) ================================================ FILE: cmd/version.go ================================================ package cmd import ( "fmt" "github.com/mosuka/cete/version" "github.com/spf13/cobra" ) var ( versionCmd = &cobra.Command{ Use: "version", Short: "Print the version number", Long: "Print the version number", RunE: func(cmd *cobra.Command, args []string) error { fmt.Printf("cete version: %s\n", version.Version) return nil }, } ) func init() { rootCmd.AddCommand(versionCmd) } ================================================ FILE: cmd/watch.go ================================================ package cmd import ( "context" "fmt" "io" "os" "os/signal" "syscall" "github.com/golang/protobuf/ptypes/empty" "github.com/mitchellh/go-homedir" "github.com/mosuka/cete/client" "github.com/mosuka/cete/marshaler" "github.com/mosuka/cete/protobuf" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( watchCmd = &cobra.Command{ Use: "watch", Short: "Watch a node updates", Long: "Watch a node updates", RunE: func(cmd *cobra.Command, args []string) error { grpcAddress = viper.GetString("grpc_address") certificateFile = viper.GetString("certificate_file") commonName = viper.GetString("common_name") c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) if err != nil { return err } defer func() { _ = c.Close() }() req := &empty.Empty{} watchClient, err := c.Watch(req) if err != nil { return err } go func() { for { resp, err := watchClient.Recv() if err == io.EOF { break } if err != nil { break } switch resp.Event.Type { case protobuf.Event_Join: eventReq := &protobuf.SetMetadataRequest{} if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) } else { if eventData == nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) } else { eventReq = eventData.(*protobuf.SetMetadataRequest) } } fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) case protobuf.Event_Leave: eventReq := &protobuf.DeleteMetadataRequest{} if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) } else { if eventData == nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) } else { eventReq = eventData.(*protobuf.DeleteMetadataRequest) } } fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) case protobuf.Event_Set: putRequest := &protobuf.SetRequest{} if putRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) } else { if putRequestInstance == nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) } else { putRequest = putRequestInstance.(*protobuf.SetRequest) } } fmt.Printf("%s, %v\n", resp.Event.Type.String(), putRequest) case protobuf.Event_Delete: deleteRequest := &protobuf.DeleteRequest{} if deleteRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) } else { if deleteRequestInstance == nil { _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) } else { deleteRequest = deleteRequestInstance.(*protobuf.DeleteRequest) } } fmt.Printf("%s, %v\n", resp.Event.Type.String(), deleteRequest) } } }() quitCh := make(chan os.Signal, 1) signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) <-quitCh return nil }, } ) func init() { rootCmd.AddCommand(watchCmd) cobra.OnInitialize(func() { if configFile != "" { viper.SetConfigFile(configFile) } else { home, err := homedir.Dir() if err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } viper.AddConfigPath("/etc") viper.AddConfigPath(home) viper.SetConfigName("cete") } viper.SetEnvPrefix("CETE") viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { switch err.(type) { case viper.ConfigFileNotFoundError: // cete.yaml does not found in config search path default: _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } }) watchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, cete.yaml in /etc and home directory will be searched") watchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") watchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") watchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") _ = viper.BindPFlag("grpc_address", watchCmd.PersistentFlags().Lookup("grpc-address")) _ = viper.BindPFlag("certificate_file", watchCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("common_name", watchCmd.PersistentFlags().Lookup("common-name")) } ================================================ FILE: docker-entrypoint.sh ================================================ #!/bin/sh set -e exec "$@" ================================================ FILE: errors/errors.go ================================================ package errors import "errors" var ( ErrNotFoundLeader = errors.New("does not found leader") ErrNodeAlreadyExists = errors.New("node already exists") ErrNodeNotReady = errors.New("node not ready") ErrNotFound = errors.New("not found") ErrTimeout = errors.New("timeout") ) ================================================ FILE: etc/cete.yaml ================================================ id: "node1" raft_address: ":7000" grpc_address: ":9000" http_address: ":8000" data_directory: "/tmp/cete/node1/data" peer_grpc_address: "" #certificate_file: "./etc/cete-cert.pem" #key_file: "./etc/cete-key.pem" #common_name: "localhost" log_level: "INFO" log_file: "" #log_max_size: 500 #log_max_backups: 3 #log_max_age: 30 #log_compress: false ================================================ FILE: go.mod ================================================ module github.com/mosuka/cete go 1.14 require ( github.com/BBVA/raft-badger v1.0.2 github.com/dgraph-io/badger/v2 v2.0.3 github.com/golang/protobuf v1.3.5 github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.14.3 github.com/hashicorp/raft v1.1.2 github.com/mash/go-accesslog v1.1.0 github.com/mitchellh/go-homedir v1.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/prometheus/client_golang v1.5.1 github.com/prometheus/common v0.9.1 github.com/spf13/cobra v0.0.7 github.com/spf13/viper v1.4.0 go.uber.org/zap v1.14.1 google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c google.golang.org/grpc v1.28.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect ) ================================================ FILE: go.sum ================================================ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BBVA/raft-badger v1.0.2 h1:FGSzkfr2iyfEretWvU2v7y34KEorWH+XSKssnWRdrBg= github.com/BBVA/raft-badger v1.0.2/go.mod h1:zsjAa/3jFfMR+ZR+XCMGhVqr9Zz7fBqT8LOuGpPzzDw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI= github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU= github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b h1:SeiGBzKrEtuDddnBABHkp4kq9sBGE9nuYmk6FPTg0zg= github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c= github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mash/go-accesslog v1.1.0 h1:y22583qP3s+SePBs6mv8ZTz5D1UffPrSg+WFEW2Rf/c= github.com/mash/go-accesslog v1.1.0/go.mod h1:DAbGQzio0KX16krP/3uouoTPxGbzcPjFAb948zazOgg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= ================================================ FILE: log/log.go ================================================ package log import ( "os" "strconv" "github.com/mash/go-accesslog" "github.com/natefinch/lumberjack" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackups int, logMaxAge int, logCompress bool) *zap.Logger { var ll zapcore.Level switch logLevel { case "DEBUG": ll = zap.DebugLevel case "INFO": ll = zap.InfoLevel case "WARN", "WARNING": ll = zap.WarnLevel case "ERR", "ERROR": ll = zap.WarnLevel case "DPANIC": ll = zap.DPanicLevel case "PANIC": ll = zap.PanicLevel case "FATAL": ll = zap.FatalLevel } var ws zapcore.WriteSyncer switch logFilename { case "", os.Stderr.Name(): ws = zapcore.AddSync(os.Stderr) case os.Stdout.Name(): ws = zapcore.AddSync(os.Stdout) default: ws = zapcore.AddSync( &lumberjack.Logger{ Filename: logFilename, MaxSize: logMaxSize, // megabytes MaxBackups: logMaxBackups, MaxAge: logMaxAge, // days Compress: logCompress, }, ) } ec := zap.NewProductionEncoderConfig() ec.TimeKey = "_timestamp_" ec.LevelKey = "_level_" ec.NameKey = "_name_" ec.CallerKey = "_caller_" ec.MessageKey = "_message_" ec.StacktraceKey = "_stacktrace_" ec.EncodeTime = zapcore.ISO8601TimeEncoder ec.EncodeCaller = zapcore.ShortCallerEncoder logger := zap.New( zapcore.NewCore( zapcore.NewJSONEncoder(ec), ws, ll, ), zap.AddCaller(), //zap.AddStacktrace(ll), ).Named("cete") return logger } type HTTPLogger struct { Logger *zap.Logger } func (l HTTPLogger) Log(record accesslog.LogRecord) { // Output log that formatted Apache combined. size := "-" if record.Size > 0 { size = strconv.FormatInt(record.Size, 10) } referer := "-" if record.RequestHeader.Get("Referer") != "" { referer = record.RequestHeader.Get("Referer") } userAgent := "-" if record.RequestHeader.Get("User-Agent") != "" { userAgent = record.RequestHeader.Get("User-Agent") } l.Logger.Info( "", zap.String("ip", record.Ip), zap.String("username", record.Username), zap.String("time", record.Time.Format("02/Jan/2006 03:04:05 +0000")), zap.String("method", record.Method), zap.String("uri", record.Uri), zap.String("protocol", record.Protocol), zap.Int("status", record.Status), zap.String("size", size), zap.String("referer", referer), zap.String("user_agent", userAgent), ) } ================================================ FILE: main.go ================================================ package main import ( "os" "github.com/mosuka/cete/cmd" ) func main() { if err := cmd.Execute(); err != nil { os.Exit(1) } os.Exit(0) } ================================================ FILE: marshaler/marshaler.go ================================================ package marshaler import ( "encoding/json" "io" "io/ioutil" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/mosuka/cete/protobuf" ) var ( DefaultContentType = "application/json" ) type CeteMarshaler struct{} func (*CeteMarshaler) ContentType() string { return DefaultContentType } func (j *CeteMarshaler) Marshal(v interface{}) ([]byte, error) { switch v.(type) { case *protobuf.GetResponse: value := v.(*protobuf.GetResponse).Value return value, nil case *protobuf.MetricsResponse: value := v.(*protobuf.MetricsResponse).Metrics return value, nil default: return json.Marshal(v) } } func (j *CeteMarshaler) Unmarshal(data []byte, v interface{}) error { return json.Unmarshal(data, v) } func (j *CeteMarshaler) NewDecoder(r io.Reader) runtime.Decoder { return runtime.DecoderFunc( func(v interface{}) error { buffer, err := ioutil.ReadAll(r) if err != nil { return err } switch v.(type) { case *protobuf.SetRequest: v.(*protobuf.SetRequest).Value = buffer return nil default: return json.Unmarshal(buffer, v) } }, ) } func (j *CeteMarshaler) NewEncoder(w io.Writer) runtime.Encoder { return json.NewEncoder(w) } func (j *CeteMarshaler) Delimiter() []byte { return []byte("\n") } ================================================ FILE: marshaler/util.go ================================================ package marshaler import ( "encoding/json" "reflect" "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/cete/protobuf" "github.com/mosuka/cete/registry" ) func init() { registry.RegisterType("protobuf.LivenessCheckResponse", reflect.TypeOf(protobuf.LivenessCheckResponse{})) registry.RegisterType("protobuf.ReadinessCheckResponse", reflect.TypeOf(protobuf.ReadinessCheckResponse{})) registry.RegisterType("protobuf.Metadata", reflect.TypeOf(protobuf.Metadata{})) registry.RegisterType("protobuf.Node", reflect.TypeOf(protobuf.Node{})) registry.RegisterType("protobuf.Cluster", reflect.TypeOf(protobuf.Cluster{})) registry.RegisterType("protobuf.JoinRequest", reflect.TypeOf(protobuf.JoinRequest{})) registry.RegisterType("protobuf.LeaveRequest", reflect.TypeOf(protobuf.LeaveRequest{})) registry.RegisterType("protobuf.NodeResponse", reflect.TypeOf(protobuf.NodeResponse{})) registry.RegisterType("protobuf.ClusterResponse", reflect.TypeOf(protobuf.ClusterResponse{})) registry.RegisterType("protobuf.GetRequest", reflect.TypeOf(protobuf.GetRequest{})) registry.RegisterType("protobuf.GetResponse", reflect.TypeOf(protobuf.GetResponse{})) registry.RegisterType("protobuf.SetRequest", reflect.TypeOf(protobuf.SetRequest{})) registry.RegisterType("protobuf.DeleteRequest", reflect.TypeOf(protobuf.DeleteRequest{})) registry.RegisterType("protobuf.SetMetadataRequest", reflect.TypeOf(protobuf.SetMetadataRequest{})) registry.RegisterType("protobuf.DeleteMetadataRequest", reflect.TypeOf(protobuf.DeleteMetadataRequest{})) registry.RegisterType("protobuf.Event", reflect.TypeOf(protobuf.Event{})) registry.RegisterType("protobuf.WatchResponse", reflect.TypeOf(protobuf.WatchResponse{})) registry.RegisterType("protobuf.MetricsResponse", reflect.TypeOf(protobuf.MetricsResponse{})) registry.RegisterType("protobuf.KeyValuePair", reflect.TypeOf(protobuf.KeyValuePair{})) registry.RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) } func MarshalAny(message *any.Any) (interface{}, error) { if message == nil { return nil, nil } typeUrl := message.TypeUrl value := message.Value instance := registry.TypeInstanceByName(typeUrl) if err := json.Unmarshal(value, instance); err != nil { return nil, err } else { return instance, nil } } func UnmarshalAny(instance interface{}, message *any.Any) error { if instance == nil { return nil } value, err := json.Marshal(instance) if err != nil { return err } message.TypeUrl = registry.TypeNameByInstance(instance) message.Value = value return nil } ================================================ FILE: marshaler/util_test.go ================================================ package marshaler import ( "bytes" "testing" "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/cete/protobuf" ) func TestMarshalAny(t *testing.T) { // test map[string]interface{} data := map[string]interface{}{"a": 1, "b": 2, "c": 3} mapAny := &any.Any{} err := UnmarshalAny(data, mapAny) if err != nil { t.Errorf("%v", err) } expectedType := "map[string]interface {}" actualType := mapAny.TypeUrl if expectedType != actualType { t.Errorf("expected content to see %s, saw %s", expectedType, actualType) } expectedValue := []byte(`{"a":1,"b":2,"c":3}`) actualValue := mapAny.Value if !bytes.Equal(expectedValue, actualValue) { t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) } // test kvs.Node node := &protobuf.Node{ RaftAddress: ":7000", State: "Leader", Metadata: &protobuf.Metadata{ GrpcAddress: ":9000", HttpAddress: ":8000", }, } nodeAny := &any.Any{} err = UnmarshalAny(node, nodeAny) if err != nil { t.Errorf("%v", err) } expectedType = "protobuf.Node" actualType = nodeAny.TypeUrl if expectedType != actualType { t.Errorf("expected content to see %s, saw %s", expectedType, actualType) } expectedValue = []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`) actualValue = nodeAny.Value if !bytes.Equal(expectedValue, actualValue) { t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) } } func TestUnmarshalAny(t *testing.T) { // test map[string]interface{} dataAny := &any.Any{ TypeUrl: "map[string]interface {}", Value: []byte(`{"a":1,"b":2,"c":3}`), } data, err := MarshalAny(dataAny) if err != nil { t.Errorf("%v", err) } dataMap := *data.(*map[string]interface{}) if dataMap["a"] != float64(1) { t.Errorf("expected content to see %v, saw %v", 1, dataMap["a"]) } if dataMap["b"] != float64(2) { t.Errorf("expected content to see %v, saw %v", 2, dataMap["b"]) } if dataMap["c"] != float64(3) { t.Errorf("expected content to see %v, saw %v", 3, dataMap["c"]) } // raft.Node dataAny = &any.Any{ TypeUrl: "protobuf.Node", Value: []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`), } data, err = MarshalAny(dataAny) if err != nil { t.Errorf("%v", err) } node := data.(*protobuf.Node) if node.RaftAddress != ":7000" { t.Errorf("expected content to see %v, saw %v", ":7000", node.RaftAddress) } if node.Metadata.GrpcAddress != ":9000" { t.Errorf("expected content to see %v, saw %v", ":9000", node.Metadata.GrpcAddress) } if node.Metadata.HttpAddress != ":8000" { t.Errorf("expected content to see %v, saw %v", ":8000", node.Metadata.HttpAddress) } if node.State != "Leader" { t.Errorf("expected content to see %v, saw %v", "Leader", node.State) } } ================================================ FILE: metric/metric.go ================================================ package metric import ( grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/prometheus/client_golang/prometheus" ) var ( // Create a metrics registry. Registry = prometheus.NewRegistry() // Create some standard server metrics. GrpcMetrics = grpcprometheus.NewServerMetrics( func(o *prometheus.CounterOpts) { o.Namespace = "cete" }, ) // Raft node state metric RaftStateMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "state", Help: "Node state. 0:Follower, 1:Candidate, 2:Leader, 3:Shutdown", }, []string{"id"}) RaftTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "term", Help: "Term.", }, []string{"id"}) RaftLastLogIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "last_log_index", Help: "Last log index.", }, []string{"id"}) RaftLastLogTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "last_log_term", Help: "Last log term.", }, []string{"id"}) RaftCommitIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "commit_index", Help: "Commit index.", }, []string{"id"}) RaftAppliedIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "applied_index", Help: "Applied index.", }, []string{"id"}) RaftFsmPendingMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "fsm_pending", Help: "FSM pending.", }, []string{"id"}) RaftLastSnapshotIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "last_snapshot_index", Help: "Last snapshot index.", }, []string{"id"}) RaftLastSnapshotTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "last_snapshot_term", Help: "Last snapshot term.", }, []string{"id"}) RaftLatestConfigurationIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "latest_configuration_index", Help: "Latest configuration index.", }, []string{"id"}) RaftNumPeersMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "num_peers", Help: "Number of peers.", }, []string{"id"}) RaftLastContactMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "last_copntact", Help: "Last contact.", }, []string{"id"}) RaftNumNodesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "raft", Name: "num_nodes", Help: "Number of nodes.", }, []string{"id"}) KvsNumReadsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_reads", Help: "Number of reads.", }, []string{"id"}) KvsNumWritesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_writes", Help: "Number of writes.", }, []string{"id"}) KvsNumBytesReadMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_bytes_read", Help: "Number of bytes read.", }, []string{"id"}) KvsNumBytesWrittenMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_bytes_written", Help: "Number of bytes written.", }, []string{"id"}) KvsNumLSMGetsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_lsm_gets", Help: "Number of LSM gets.", }, []string{"id"}) KvsNumLSMBloomHitsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_lsm_bloom_Hits", Help: "Number of LSM bloom hits.", }, []string{"id"}) KvsNumGetsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_gets", Help: "Number of gets.", }, []string{"id"}) KvsNumPutsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_puts", Help: "Number of puts.", }, []string{"id"}) KvsNumBlockedPutsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_blocked_puts", Help: "Number of blocked puts.", }, []string{"id"}) KvsNumMemtablesGetsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "num_memtables_gets", Help: "Number of memtables gets.", }, []string{"id"}) KvsLSMSizeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "lsm_size", Help: "LSM size.", }, []string{"id", "path"}) KvsVlogSizeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "vlog_size", Help: "Vlog size.", }, []string{"id", "path"}) KvsPendingWritesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cete", Subsystem: "kvs", Name: "pending_writes", Help: "Pending writes.", }, []string{"id", "path"}) ) func init() { // Register standard server metrics and customized metrics to registry. Registry.MustRegister( GrpcMetrics, RaftStateMetric, RaftTermMetric, RaftLastLogIndexMetric, RaftLastLogTermMetric, RaftCommitIndexMetric, RaftAppliedIndexMetric, RaftFsmPendingMetric, RaftLastSnapshotIndexMetric, RaftLastSnapshotTermMetric, RaftLatestConfigurationIndexMetric, RaftNumPeersMetric, RaftLastContactMetric, RaftNumNodesMetric, KvsNumReadsMetric, KvsNumWritesMetric, KvsNumBytesReadMetric, KvsNumBytesWrittenMetric, KvsNumLSMGetsMetric, KvsNumLSMBloomHitsMetric, KvsNumGetsMetric, KvsNumPutsMetric, KvsNumBlockedPutsMetric, KvsNumMemtablesGetsMetric, KvsLSMSizeMetric, KvsVlogSizeMetric, KvsPendingWritesMetric, ) GrpcMetrics.EnableHandlingTimeHistogram( func(o *prometheus.HistogramOpts) { o.Namespace = "cete" }, ) } ================================================ FILE: protobuf/kvs.pb.go ================================================ // Code generated by protoc-gen-go. DO NOT EDIT. // source: protobuf/kvs.proto package protobuf import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Event_Type int32 const ( Event_Unknown Event_Type = 0 Event_Join Event_Type = 1 Event_Leave Event_Type = 2 Event_Set Event_Type = 3 Event_Delete Event_Type = 4 ) var Event_Type_name = map[int32]string{ 0: "Unknown", 1: "Join", 2: "Leave", 3: "Set", 4: "Delete", } var Event_Type_value = map[string]int32{ "Unknown": 0, "Join": 1, "Leave": 2, "Set": 3, "Delete": 4, } func (x Event_Type) String() string { return proto.EnumName(Event_Type_name, int32(x)) } func (Event_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{17, 0} } type LivenessCheckResponse struct { Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LivenessCheckResponse) Reset() { *m = LivenessCheckResponse{} } func (m *LivenessCheckResponse) String() string { return proto.CompactTextString(m) } func (*LivenessCheckResponse) ProtoMessage() {} func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{0} } func (m *LivenessCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LivenessCheckResponse.Unmarshal(m, b) } func (m *LivenessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LivenessCheckResponse.Marshal(b, m, deterministic) } func (m *LivenessCheckResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LivenessCheckResponse.Merge(m, src) } func (m *LivenessCheckResponse) XXX_Size() int { return xxx_messageInfo_LivenessCheckResponse.Size(m) } func (m *LivenessCheckResponse) XXX_DiscardUnknown() { xxx_messageInfo_LivenessCheckResponse.DiscardUnknown(m) } var xxx_messageInfo_LivenessCheckResponse proto.InternalMessageInfo func (m *LivenessCheckResponse) GetAlive() bool { if m != nil { return m.Alive } return false } type ReadinessCheckResponse struct { Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ReadinessCheckResponse) Reset() { *m = ReadinessCheckResponse{} } func (m *ReadinessCheckResponse) String() string { return proto.CompactTextString(m) } func (*ReadinessCheckResponse) ProtoMessage() {} func (*ReadinessCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{1} } func (m *ReadinessCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadinessCheckResponse.Unmarshal(m, b) } func (m *ReadinessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadinessCheckResponse.Marshal(b, m, deterministic) } func (m *ReadinessCheckResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadinessCheckResponse.Merge(m, src) } func (m *ReadinessCheckResponse) XXX_Size() int { return xxx_messageInfo_ReadinessCheckResponse.Size(m) } func (m *ReadinessCheckResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReadinessCheckResponse.DiscardUnknown(m) } var xxx_messageInfo_ReadinessCheckResponse proto.InternalMessageInfo func (m *ReadinessCheckResponse) GetReady() bool { if m != nil { return m.Ready } return false } type Metadata struct { GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) String() string { return proto.CompactTextString(m) } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{2} } func (m *Metadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metadata.Unmarshal(m, b) } func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } func (m *Metadata) XXX_Merge(src proto.Message) { xxx_messageInfo_Metadata.Merge(m, src) } func (m *Metadata) XXX_Size() int { return xxx_messageInfo_Metadata.Size(m) } func (m *Metadata) XXX_DiscardUnknown() { xxx_messageInfo_Metadata.DiscardUnknown(m) } var xxx_messageInfo_Metadata proto.InternalMessageInfo func (m *Metadata) GetGrpcAddress() string { if m != nil { return m.GrpcAddress } return "" } func (m *Metadata) GetHttpAddress() string { if m != nil { return m.HttpAddress } return "" } type Node struct { RaftAddress string `protobuf:"bytes,1,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Node) Reset() { *m = Node{} } func (m *Node) String() string { return proto.CompactTextString(m) } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{3} } func (m *Node) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Node.Unmarshal(m, b) } func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Node.Marshal(b, m, deterministic) } func (m *Node) XXX_Merge(src proto.Message) { xxx_messageInfo_Node.Merge(m, src) } func (m *Node) XXX_Size() int { return xxx_messageInfo_Node.Size(m) } func (m *Node) XXX_DiscardUnknown() { xxx_messageInfo_Node.DiscardUnknown(m) } var xxx_messageInfo_Node proto.InternalMessageInfo func (m *Node) GetRaftAddress() string { if m != nil { return m.RaftAddress } return "" } func (m *Node) GetMetadata() *Metadata { if m != nil { return m.Metadata } return nil } func (m *Node) GetState() string { if m != nil { return m.State } return "" } type Cluster struct { Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Cluster) Reset() { *m = Cluster{} } func (m *Cluster) String() string { return proto.CompactTextString(m) } func (*Cluster) ProtoMessage() {} func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{4} } func (m *Cluster) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Cluster.Unmarshal(m, b) } func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) } func (m *Cluster) XXX_Merge(src proto.Message) { xxx_messageInfo_Cluster.Merge(m, src) } func (m *Cluster) XXX_Size() int { return xxx_messageInfo_Cluster.Size(m) } func (m *Cluster) XXX_DiscardUnknown() { xxx_messageInfo_Cluster.DiscardUnknown(m) } var xxx_messageInfo_Cluster proto.InternalMessageInfo func (m *Cluster) GetNodes() map[string]*Node { if m != nil { return m.Nodes } return nil } func (m *Cluster) GetLeader() string { if m != nil { return m.Leader } return "" } type JoinRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *JoinRequest) Reset() { *m = JoinRequest{} } func (m *JoinRequest) String() string { return proto.CompactTextString(m) } func (*JoinRequest) ProtoMessage() {} func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{5} } func (m *JoinRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_JoinRequest.Unmarshal(m, b) } func (m *JoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_JoinRequest.Marshal(b, m, deterministic) } func (m *JoinRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_JoinRequest.Merge(m, src) } func (m *JoinRequest) XXX_Size() int { return xxx_messageInfo_JoinRequest.Size(m) } func (m *JoinRequest) XXX_DiscardUnknown() { xxx_messageInfo_JoinRequest.DiscardUnknown(m) } var xxx_messageInfo_JoinRequest proto.InternalMessageInfo func (m *JoinRequest) GetId() string { if m != nil { return m.Id } return "" } func (m *JoinRequest) GetNode() *Node { if m != nil { return m.Node } return nil } type LeaveRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } func (m *LeaveRequest) String() string { return proto.CompactTextString(m) } func (*LeaveRequest) ProtoMessage() {} func (*LeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{6} } func (m *LeaveRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LeaveRequest.Unmarshal(m, b) } func (m *LeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LeaveRequest.Marshal(b, m, deterministic) } func (m *LeaveRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LeaveRequest.Merge(m, src) } func (m *LeaveRequest) XXX_Size() int { return xxx_messageInfo_LeaveRequest.Size(m) } func (m *LeaveRequest) XXX_DiscardUnknown() { xxx_messageInfo_LeaveRequest.DiscardUnknown(m) } var xxx_messageInfo_LeaveRequest proto.InternalMessageInfo func (m *LeaveRequest) GetId() string { if m != nil { return m.Id } return "" } type NodeResponse struct { Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NodeResponse) Reset() { *m = NodeResponse{} } func (m *NodeResponse) String() string { return proto.CompactTextString(m) } func (*NodeResponse) ProtoMessage() {} func (*NodeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{7} } func (m *NodeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeResponse.Unmarshal(m, b) } func (m *NodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeResponse.Marshal(b, m, deterministic) } func (m *NodeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_NodeResponse.Merge(m, src) } func (m *NodeResponse) XXX_Size() int { return xxx_messageInfo_NodeResponse.Size(m) } func (m *NodeResponse) XXX_DiscardUnknown() { xxx_messageInfo_NodeResponse.DiscardUnknown(m) } var xxx_messageInfo_NodeResponse proto.InternalMessageInfo func (m *NodeResponse) GetNode() *Node { if m != nil { return m.Node } return nil } type ClusterResponse struct { Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClusterResponse) Reset() { *m = ClusterResponse{} } func (m *ClusterResponse) String() string { return proto.CompactTextString(m) } func (*ClusterResponse) ProtoMessage() {} func (*ClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{8} } func (m *ClusterResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClusterResponse.Unmarshal(m, b) } func (m *ClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClusterResponse.Marshal(b, m, deterministic) } func (m *ClusterResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ClusterResponse.Merge(m, src) } func (m *ClusterResponse) XXX_Size() int { return xxx_messageInfo_ClusterResponse.Size(m) } func (m *ClusterResponse) XXX_DiscardUnknown() { xxx_messageInfo_ClusterResponse.DiscardUnknown(m) } var xxx_messageInfo_ClusterResponse proto.InternalMessageInfo func (m *ClusterResponse) GetCluster() *Cluster { if m != nil { return m.Cluster } return nil } type GetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{9} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetRequest.Unmarshal(m, b) } func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } func (m *GetRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetRequest.Merge(m, src) } func (m *GetRequest) XXX_Size() int { return xxx_messageInfo_GetRequest.Size(m) } func (m *GetRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetRequest.DiscardUnknown(m) } var xxx_messageInfo_GetRequest proto.InternalMessageInfo func (m *GetRequest) GetKey() string { if m != nil { return m.Key } return "" } type GetResponse struct { Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{10} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetResponse.Unmarshal(m, b) } func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) } func (m *GetResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetResponse.Merge(m, src) } func (m *GetResponse) XXX_Size() int { return xxx_messageInfo_GetResponse.Size(m) } func (m *GetResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetResponse.DiscardUnknown(m) } var xxx_messageInfo_GetResponse proto.InternalMessageInfo func (m *GetResponse) GetValue() []byte { if m != nil { return m.Value } return nil } type ScanRequest struct { Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanRequest) Reset() { *m = ScanRequest{} } func (m *ScanRequest) String() string { return proto.CompactTextString(m) } func (*ScanRequest) ProtoMessage() {} func (*ScanRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{11} } func (m *ScanRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ScanRequest.Unmarshal(m, b) } func (m *ScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ScanRequest.Marshal(b, m, deterministic) } func (m *ScanRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanRequest.Merge(m, src) } func (m *ScanRequest) XXX_Size() int { return xxx_messageInfo_ScanRequest.Size(m) } func (m *ScanRequest) XXX_DiscardUnknown() { xxx_messageInfo_ScanRequest.DiscardUnknown(m) } var xxx_messageInfo_ScanRequest proto.InternalMessageInfo func (m *ScanRequest) GetPrefix() string { if m != nil { return m.Prefix } return "" } type ScanResponse struct { Values [][]byte `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanResponse) Reset() { *m = ScanResponse{} } func (m *ScanResponse) String() string { return proto.CompactTextString(m) } func (*ScanResponse) ProtoMessage() {} func (*ScanResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{12} } func (m *ScanResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ScanResponse.Unmarshal(m, b) } func (m *ScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ScanResponse.Marshal(b, m, deterministic) } func (m *ScanResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanResponse.Merge(m, src) } func (m *ScanResponse) XXX_Size() int { return xxx_messageInfo_ScanResponse.Size(m) } func (m *ScanResponse) XXX_DiscardUnknown() { xxx_messageInfo_ScanResponse.DiscardUnknown(m) } var xxx_messageInfo_ScanResponse proto.InternalMessageInfo func (m *ScanResponse) GetValues() [][]byte { if m != nil { return m.Values } return nil } type SetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SetRequest) Reset() { *m = SetRequest{} } func (m *SetRequest) String() string { return proto.CompactTextString(m) } func (*SetRequest) ProtoMessage() {} func (*SetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{13} } func (m *SetRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetRequest.Unmarshal(m, b) } func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) } func (m *SetRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetRequest.Merge(m, src) } func (m *SetRequest) XXX_Size() int { return xxx_messageInfo_SetRequest.Size(m) } func (m *SetRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetRequest.DiscardUnknown(m) } var xxx_messageInfo_SetRequest proto.InternalMessageInfo func (m *SetRequest) GetKey() string { if m != nil { return m.Key } return "" } func (m *SetRequest) GetValue() []byte { if m != nil { return m.Value } return nil } type DeleteRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{14} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) } func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) } func (m *DeleteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_DeleteRequest.Merge(m, src) } func (m *DeleteRequest) XXX_Size() int { return xxx_messageInfo_DeleteRequest.Size(m) } func (m *DeleteRequest) XXX_DiscardUnknown() { xxx_messageInfo_DeleteRequest.DiscardUnknown(m) } var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo func (m *DeleteRequest) GetKey() string { if m != nil { return m.Key } return "" } type SetMetadataRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SetMetadataRequest) Reset() { *m = SetMetadataRequest{} } func (m *SetMetadataRequest) String() string { return proto.CompactTextString(m) } func (*SetMetadataRequest) ProtoMessage() {} func (*SetMetadataRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{15} } func (m *SetMetadataRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SetMetadataRequest.Unmarshal(m, b) } func (m *SetMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SetMetadataRequest.Marshal(b, m, deterministic) } func (m *SetMetadataRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetMetadataRequest.Merge(m, src) } func (m *SetMetadataRequest) XXX_Size() int { return xxx_messageInfo_SetMetadataRequest.Size(m) } func (m *SetMetadataRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetMetadataRequest.DiscardUnknown(m) } var xxx_messageInfo_SetMetadataRequest proto.InternalMessageInfo func (m *SetMetadataRequest) GetId() string { if m != nil { return m.Id } return "" } func (m *SetMetadataRequest) GetMetadata() *Metadata { if m != nil { return m.Metadata } return nil } type DeleteMetadataRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DeleteMetadataRequest) Reset() { *m = DeleteMetadataRequest{} } func (m *DeleteMetadataRequest) String() string { return proto.CompactTextString(m) } func (*DeleteMetadataRequest) ProtoMessage() {} func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{16} } func (m *DeleteMetadataRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteMetadataRequest.Unmarshal(m, b) } func (m *DeleteMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeleteMetadataRequest.Marshal(b, m, deterministic) } func (m *DeleteMetadataRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_DeleteMetadataRequest.Merge(m, src) } func (m *DeleteMetadataRequest) XXX_Size() int { return xxx_messageInfo_DeleteMetadataRequest.Size(m) } func (m *DeleteMetadataRequest) XXX_DiscardUnknown() { xxx_messageInfo_DeleteMetadataRequest.DiscardUnknown(m) } var xxx_messageInfo_DeleteMetadataRequest proto.InternalMessageInfo func (m *DeleteMetadataRequest) GetId() string { if m != nil { return m.Id } return "" } type Event struct { Type Event_Type `protobuf:"varint,1,opt,name=type,proto3,enum=kvs.Event_Type" json:"type,omitempty"` Data *any.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{17} } func (m *Event) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Event.Unmarshal(m, b) } func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Event.Marshal(b, m, deterministic) } func (m *Event) XXX_Merge(src proto.Message) { xxx_messageInfo_Event.Merge(m, src) } func (m *Event) XXX_Size() int { return xxx_messageInfo_Event.Size(m) } func (m *Event) XXX_DiscardUnknown() { xxx_messageInfo_Event.DiscardUnknown(m) } var xxx_messageInfo_Event proto.InternalMessageInfo func (m *Event) GetType() Event_Type { if m != nil { return m.Type } return Event_Unknown } func (m *Event) GetData() *any.Any { if m != nil { return m.Data } return nil } type WatchResponse struct { Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *WatchResponse) Reset() { *m = WatchResponse{} } func (m *WatchResponse) String() string { return proto.CompactTextString(m) } func (*WatchResponse) ProtoMessage() {} func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{18} } func (m *WatchResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_WatchResponse.Unmarshal(m, b) } func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) } func (m *WatchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_WatchResponse.Merge(m, src) } func (m *WatchResponse) XXX_Size() int { return xxx_messageInfo_WatchResponse.Size(m) } func (m *WatchResponse) XXX_DiscardUnknown() { xxx_messageInfo_WatchResponse.DiscardUnknown(m) } var xxx_messageInfo_WatchResponse proto.InternalMessageInfo func (m *WatchResponse) GetEvent() *Event { if m != nil { return m.Event } return nil } type MetricsResponse struct { Metrics []byte `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } func (m *MetricsResponse) String() string { return proto.CompactTextString(m) } func (*MetricsResponse) ProtoMessage() {} func (*MetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{19} } func (m *MetricsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetricsResponse.Unmarshal(m, b) } func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic) } func (m *MetricsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MetricsResponse.Merge(m, src) } func (m *MetricsResponse) XXX_Size() int { return xxx_messageInfo_MetricsResponse.Size(m) } func (m *MetricsResponse) XXX_DiscardUnknown() { xxx_messageInfo_MetricsResponse.DiscardUnknown(m) } var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo func (m *MetricsResponse) GetMetrics() []byte { if m != nil { return m.Metrics } return nil } type KeyValuePair struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *KeyValuePair) Reset() { *m = KeyValuePair{} } func (m *KeyValuePair) String() string { return proto.CompactTextString(m) } func (*KeyValuePair) ProtoMessage() {} func (*KeyValuePair) Descriptor() ([]byte, []int) { return fileDescriptor_431078ad7b21f851, []int{20} } func (m *KeyValuePair) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_KeyValuePair.Unmarshal(m, b) } func (m *KeyValuePair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_KeyValuePair.Marshal(b, m, deterministic) } func (m *KeyValuePair) XXX_Merge(src proto.Message) { xxx_messageInfo_KeyValuePair.Merge(m, src) } func (m *KeyValuePair) XXX_Size() int { return xxx_messageInfo_KeyValuePair.Size(m) } func (m *KeyValuePair) XXX_DiscardUnknown() { xxx_messageInfo_KeyValuePair.DiscardUnknown(m) } var xxx_messageInfo_KeyValuePair proto.InternalMessageInfo func (m *KeyValuePair) GetKey() string { if m != nil { return m.Key } return "" } func (m *KeyValuePair) GetValue() []byte { if m != nil { return m.Value } return nil } func init() { proto.RegisterEnum("kvs.Event_Type", Event_Type_name, Event_Type_value) proto.RegisterType((*LivenessCheckResponse)(nil), "kvs.LivenessCheckResponse") proto.RegisterType((*ReadinessCheckResponse)(nil), "kvs.ReadinessCheckResponse") proto.RegisterType((*Metadata)(nil), "kvs.Metadata") proto.RegisterType((*Node)(nil), "kvs.Node") proto.RegisterType((*Cluster)(nil), "kvs.Cluster") proto.RegisterMapType((map[string]*Node)(nil), "kvs.Cluster.NodesEntry") proto.RegisterType((*JoinRequest)(nil), "kvs.JoinRequest") proto.RegisterType((*LeaveRequest)(nil), "kvs.LeaveRequest") proto.RegisterType((*NodeResponse)(nil), "kvs.NodeResponse") proto.RegisterType((*ClusterResponse)(nil), "kvs.ClusterResponse") proto.RegisterType((*GetRequest)(nil), "kvs.GetRequest") proto.RegisterType((*GetResponse)(nil), "kvs.GetResponse") proto.RegisterType((*ScanRequest)(nil), "kvs.ScanRequest") proto.RegisterType((*ScanResponse)(nil), "kvs.ScanResponse") proto.RegisterType((*SetRequest)(nil), "kvs.SetRequest") proto.RegisterType((*DeleteRequest)(nil), "kvs.DeleteRequest") proto.RegisterType((*SetMetadataRequest)(nil), "kvs.SetMetadataRequest") proto.RegisterType((*DeleteMetadataRequest)(nil), "kvs.DeleteMetadataRequest") proto.RegisterType((*Event)(nil), "kvs.Event") proto.RegisterType((*WatchResponse)(nil), "kvs.WatchResponse") proto.RegisterType((*MetricsResponse)(nil), "kvs.MetricsResponse") proto.RegisterType((*KeyValuePair)(nil), "kvs.KeyValuePair") } func init() { proto.RegisterFile("protobuf/kvs.proto", fileDescriptor_431078ad7b21f851) } var fileDescriptor_431078ad7b21f851 = []byte{ // 1016 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xdd, 0x6e, 0xdb, 0x46, 0x13, 0x8d, 0xfe, 0x6c, 0x79, 0x24, 0xdb, 0xcc, 0x58, 0xd6, 0xe7, 0xd0, 0xf9, 0x9c, 0x78, 0x83, 0xa6, 0xae, 0x5b, 0x8b, 0x8d, 0x5b, 0xf4, 0xc7, 0x68, 0x2e, 0x52, 0xd7, 0x08, 0xda, 0x38, 0x8d, 0x41, 0xb5, 0x29, 0xd0, 0x1b, 0x63, 0x4d, 0x8e, 0x65, 0x56, 0x32, 0xc9, 0x92, 0x2b, 0xa5, 0x42, 0x90, 0x9b, 0x02, 0x7d, 0x82, 0xa2, 0x4f, 0xd6, 0x57, 0xe8, 0x83, 0x14, 0xfb, 0x43, 0x51, 0xb2, 0xc4, 0x26, 0x57, 0xe2, 0xee, 0x9c, 0x3d, 0x73, 0x76, 0x66, 0xf6, 0x40, 0x80, 0x71, 0x12, 0x89, 0xe8, 0x62, 0x78, 0xe9, 0xf4, 0x47, 0x69, 0x47, 0x2d, 0xb0, 0xd2, 0x1f, 0xa5, 0xf6, 0x9d, 0x5e, 0x14, 0xf5, 0x06, 0xe4, 0x4c, 0xe2, 0x3c, 0x1c, 0xeb, 0xb8, 0xbd, 0x7d, 0x33, 0x44, 0xd7, 0xb1, 0xc8, 0x82, 0x77, 0x4d, 0x90, 0xc7, 0x81, 0xc3, 0xc3, 0x30, 0x12, 0x5c, 0x04, 0x51, 0x68, 0xa8, 0xed, 0x8f, 0xd4, 0x8f, 0x77, 0xd0, 0xa3, 0xf0, 0x20, 0x7d, 0xc5, 0x7b, 0x3d, 0x4a, 0x9c, 0x28, 0x56, 0x88, 0x79, 0x34, 0x3b, 0x80, 0xcd, 0xd3, 0x60, 0x44, 0x21, 0xa5, 0xe9, 0xf1, 0x15, 0x79, 0x7d, 0x97, 0xd2, 0x38, 0x0a, 0x53, 0xc2, 0x16, 0xd4, 0xf8, 0x20, 0x18, 0xd1, 0x56, 0xe9, 0x7e, 0x69, 0xaf, 0xee, 0xea, 0x05, 0xeb, 0x40, 0xdb, 0x25, 0xee, 0x07, 0x0b, 0xf1, 0x09, 0x71, 0x7f, 0x9c, 0xe1, 0xd5, 0x82, 0x9d, 0x41, 0xfd, 0x39, 0x09, 0xee, 0x73, 0xc1, 0x71, 0x17, 0x9a, 0xbd, 0x24, 0xf6, 0xce, 0xb9, 0xef, 0x27, 0x94, 0xa6, 0x0a, 0xb8, 0xe2, 0x36, 0xe4, 0xde, 0x13, 0xbd, 0x25, 0x21, 0x57, 0x42, 0xc4, 0x13, 0x48, 0x59, 0x43, 0xe4, 0x9e, 0x81, 0xb0, 0x5f, 0xa0, 0xfa, 0x7d, 0xe4, 0x93, 0x84, 0x26, 0xfc, 0x52, 0xdc, 0x64, 0x93, 0x7b, 0x19, 0xdb, 0x07, 0x50, 0xbf, 0x36, 0xc9, 0x15, 0x53, 0xe3, 0x70, 0xb5, 0x23, 0x5b, 0x90, 0x29, 0x72, 0x27, 0x61, 0xa9, 0x3e, 0x15, 0x5c, 0xd0, 0x56, 0x45, 0xd1, 0xe8, 0x05, 0xfb, 0xab, 0x04, 0xcb, 0xc7, 0x83, 0x61, 0x2a, 0x28, 0xc1, 0x03, 0xa8, 0x85, 0x91, 0x4f, 0x32, 0x51, 0x65, 0xaf, 0x71, 0xf8, 0x3f, 0xc5, 0x64, 0x82, 0x1d, 0xa9, 0x28, 0x3d, 0x09, 0x45, 0x32, 0x76, 0x35, 0x0a, 0xdb, 0xb0, 0x34, 0x20, 0xee, 0x53, 0x62, 0xee, 0x60, 0x56, 0xf6, 0x31, 0x40, 0x0e, 0x46, 0x0b, 0x2a, 0x7d, 0x1a, 0x1b, 0xed, 0xf2, 0x13, 0xef, 0x41, 0x6d, 0xc4, 0x07, 0x43, 0x32, 0x82, 0x57, 0x54, 0x1a, 0x79, 0xc2, 0xd5, 0xfb, 0x47, 0xe5, 0x2f, 0x4a, 0xec, 0x2b, 0x68, 0x7c, 0x17, 0x05, 0xa1, 0x4b, 0xbf, 0x0e, 0x29, 0x15, 0xb8, 0x06, 0xe5, 0xc0, 0x37, 0x24, 0xe5, 0xc0, 0xc7, 0xff, 0x43, 0x55, 0x8a, 0x98, 0xa7, 0x50, 0xdb, 0x6c, 0x07, 0x9a, 0xa7, 0xc4, 0x47, 0x54, 0x70, 0x9c, 0x1d, 0x40, 0x53, 0xa1, 0xb3, 0xce, 0x66, 0x74, 0xa5, 0xc5, 0x74, 0x5f, 0xc2, 0xba, 0x29, 0xc3, 0xe4, 0xc4, 0x43, 0x58, 0xf6, 0xf4, 0x96, 0x39, 0xd4, 0x9c, 0xae, 0x96, 0x9b, 0x05, 0xd9, 0x0e, 0xc0, 0x53, 0x12, 0x99, 0x8e, 0xb9, 0x62, 0xb0, 0x07, 0xd0, 0x50, 0xf1, 0x7c, 0xc4, 0x74, 0x6d, 0x24, 0xa4, 0x69, 0x0a, 0xc2, 0xde, 0x83, 0x46, 0xd7, 0xe3, 0x93, 0x62, 0xb4, 0x61, 0x29, 0x4e, 0xe8, 0x32, 0xf8, 0xcd, 0x10, 0x99, 0x15, 0x7b, 0x08, 0x4d, 0x0d, 0x33, 0x64, 0x6d, 0x58, 0x52, 0xe7, 0x75, 0x43, 0x9b, 0xae, 0x59, 0xb1, 0x4f, 0x01, 0xba, 0xff, 0xa1, 0x29, 0x17, 0x51, 0x9e, 0x16, 0xb1, 0x0b, 0xab, 0xdf, 0xd0, 0x80, 0x04, 0x15, 0x5f, 0xe6, 0x05, 0x60, 0x97, 0xc4, 0x64, 0xf6, 0x0a, 0x7a, 0xf7, 0xee, 0x33, 0xcb, 0xde, 0x87, 0x4d, 0x9d, 0xf3, 0x2d, 0x9c, 0x72, 0x8c, 0x6b, 0x27, 0x23, 0x0a, 0x05, 0x3e, 0x80, 0xaa, 0x18, 0xc7, 0xba, 0x80, 0x6b, 0x87, 0xeb, 0x8a, 0x59, 0x45, 0x3a, 0x3f, 0x8c, 0x63, 0x72, 0x55, 0x10, 0xf7, 0xa0, 0x3a, 0x95, 0xbe, 0xd5, 0xd1, 0x6e, 0xd3, 0xc9, 0xac, 0xa8, 0xf3, 0x24, 0x1c, 0xbb, 0x0a, 0xc1, 0x1e, 0x43, 0x55, 0x9e, 0xc3, 0x06, 0x2c, 0xff, 0x18, 0xf6, 0xc3, 0xe8, 0x55, 0x68, 0xdd, 0xc2, 0x3a, 0x54, 0xe5, 0x70, 0x5a, 0x25, 0x5c, 0x81, 0x9a, 0x1a, 0x34, 0xab, 0x8c, 0xcb, 0x50, 0xe9, 0x92, 0xb0, 0x2a, 0x08, 0xb0, 0xa4, 0x45, 0x5b, 0x55, 0xf6, 0x08, 0x56, 0x7f, 0xe2, 0xc2, 0xbb, 0x9a, 0xf4, 0xe4, 0x3e, 0xd4, 0x48, 0xaa, 0x31, 0x53, 0x03, 0xb9, 0x3e, 0x57, 0x07, 0xd8, 0x87, 0xb0, 0xfe, 0x9c, 0x44, 0x12, 0x78, 0xe9, 0xe4, 0xd0, 0x16, 0x2c, 0x5f, 0xeb, 0x2d, 0x33, 0x17, 0xd9, 0x92, 0x7d, 0x06, 0xcd, 0x67, 0x34, 0x7e, 0x29, 0x1b, 0x74, 0xc6, 0x83, 0xe4, 0x5d, 0x9b, 0x79, 0xf8, 0x47, 0x1d, 0x2a, 0xcf, 0x5e, 0x76, 0xf1, 0x1c, 0x56, 0x67, 0xbc, 0x11, 0xdb, 0x73, 0xb5, 0x38, 0x91, 0xb6, 0x6c, 0xdb, 0x4a, 0xe8, 0x42, 0x1f, 0x65, 0xf6, 0xef, 0x7f, 0xff, 0xf3, 0x67, 0xb9, 0x85, 0xe8, 0x8c, 0x1e, 0x39, 0x03, 0x03, 0x39, 0xf7, 0x14, 0xdf, 0x05, 0xac, 0xcd, 0xba, 0x69, 0x61, 0x86, 0x6d, 0x95, 0x61, 0xb1, 0xf5, 0xb2, 0x6d, 0x95, 0x62, 0x13, 0x37, 0x64, 0x8a, 0x24, 0xc3, 0x98, 0x1c, 0xc7, 0xc6, 0x2f, 0x8b, 0x98, 0x6f, 0xe7, 0xef, 0x39, 0xe3, 0xb3, 0x14, 0x1f, 0x60, 0x5d, 0xf2, 0xc9, 0x37, 0x8e, 0x67, 0xba, 0xa7, 0x68, 0x29, 0xf0, 0x94, 0xf7, 0xd8, 0x05, 0xb4, 0x6c, 0x47, 0x71, 0x6c, 0xd9, 0x96, 0xe4, 0x30, 0xef, 0xdd, 0x79, 0x1d, 0xf8, 0x6f, 0x8e, 0x94, 0x6b, 0xe0, 0x69, 0xee, 0xac, 0x45, 0xca, 0x5a, 0x33, 0xa6, 0x91, 0x89, 0xdb, 0x50, 0xc4, 0xab, 0xd8, 0x98, 0x22, 0xc6, 0x53, 0x33, 0x69, 0xa8, 0x6f, 0x33, 0x6d, 0x6f, 0x85, 0x0a, 0xb7, 0x14, 0x11, 0xee, 0xcf, 0x29, 0xc4, 0x33, 0xa8, 0x77, 0x43, 0x1e, 0xa7, 0x57, 0x91, 0x28, 0x14, 0x57, 0xc4, 0xda, 0x52, 0xac, 0x6b, 0xd8, 0x94, 0xac, 0x69, 0xc6, 0x72, 0x0c, 0x95, 0xa7, 0x24, 0x50, 0x3f, 0xb8, 0xdc, 0xf2, 0x6c, 0x2b, 0xdf, 0x30, 0xd7, 0xbb, 0xa3, 0xce, 0x6f, 0xe0, 0x6d, 0x79, 0x5e, 0x3e, 0x32, 0xe7, 0x75, 0x9f, 0xc6, 0x8f, 0xf7, 0xf7, 0xdf, 0xe0, 0xb7, 0x50, 0x95, 0x0e, 0x66, 0x9a, 0x30, 0xe5, 0x79, 0xa6, 0x87, 0xd3, 0xf6, 0xc6, 0xee, 0x2a, 0x9e, 0x36, 0xb6, 0x72, 0x1e, 0x6d, 0x84, 0x8a, 0xea, 0x54, 0x3d, 0x47, 0xa3, 0x27, 0xb7, 0xbb, 0xc2, 0x5b, 0x19, 0x36, 0x7b, 0x5e, 0xd5, 0x51, 0x69, 0x1f, 0x5f, 0x64, 0x6f, 0x1a, 0x51, 0x11, 0xce, 0x38, 0x61, 0x21, 0xa7, 0xb9, 0xe9, 0xfe, 0x82, 0x9b, 0x7e, 0x0e, 0x35, 0x65, 0x0c, 0x85, 0xd5, 0xd7, 0x79, 0x66, 0xcc, 0x83, 0xdd, 0xfa, 0xb8, 0x24, 0xa7, 0xca, 0xd8, 0xc3, 0x5b, 0xa6, 0xea, 0x86, 0x89, 0xcc, 0x4e, 0x95, 0xf1, 0x8f, 0xaf, 0x77, 0x7f, 0xbe, 0xd7, 0x0b, 0xc4, 0xd5, 0xf0, 0xa2, 0xe3, 0x45, 0xd7, 0xce, 0x75, 0x94, 0x0e, 0xfb, 0xdc, 0xf1, 0x48, 0xe4, 0x7f, 0xcb, 0x2e, 0x96, 0xd4, 0xd7, 0x27, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xf6, 0xd7, 0x64, 0xe4, 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // KVSClient is the client API for KVS service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type KVSClient interface { LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) Scan(ctx context.Context, in *ScanRequest, opts ...grpc.CallOption) (*ScanResponse, error) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (KVS_WatchClient, error) Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) } type kVSClient struct { cc grpc.ClientConnInterface } func NewKVSClient(cc grpc.ClientConnInterface) KVSClient { return &kVSClient{cc} } func (c *kVSClient) LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) { out := new(LivenessCheckResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/LivenessCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) { out := new(ReadinessCheckResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/ReadinessCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) { out := new(NodeResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/Node", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) err := c.cc.Invoke(ctx, "/kvs.KVS/Join", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) { out := new(ClusterResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/Cluster", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) err := c.cc.Invoke(ctx, "/kvs.KVS/Leave", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) err := c.cc.Invoke(ctx, "/kvs.KVS/Snapshot", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { out := new(GetResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/Get", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Scan(ctx context.Context, in *ScanRequest, opts ...grpc.CallOption) (*ScanResponse, error) { out := new(ScanResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/Scan", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) err := c.cc.Invoke(ctx, "/kvs.KVS/Set", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) err := c.cc.Invoke(ctx, "/kvs.KVS/Delete", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *kVSClient) Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (KVS_WatchClient, error) { stream, err := c.cc.NewStream(ctx, &_KVS_serviceDesc.Streams[0], "/kvs.KVS/Watch", opts...) if err != nil { return nil, err } x := &kVSWatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type KVS_WatchClient interface { Recv() (*WatchResponse, error) grpc.ClientStream } type kVSWatchClient struct { grpc.ClientStream } func (x *kVSWatchClient) Recv() (*WatchResponse, error) { m := new(WatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *kVSClient) Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) { out := new(MetricsResponse) err := c.cc.Invoke(ctx, "/kvs.KVS/Metrics", in, out, opts...) if err != nil { return nil, err } return out, nil } // KVSServer is the server API for KVS service. type KVSServer interface { LivenessCheck(context.Context, *empty.Empty) (*LivenessCheckResponse, error) ReadinessCheck(context.Context, *empty.Empty) (*ReadinessCheckResponse, error) Node(context.Context, *empty.Empty) (*NodeResponse, error) Join(context.Context, *JoinRequest) (*empty.Empty, error) Cluster(context.Context, *empty.Empty) (*ClusterResponse, error) Leave(context.Context, *LeaveRequest) (*empty.Empty, error) Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) Get(context.Context, *GetRequest) (*GetResponse, error) Scan(context.Context, *ScanRequest) (*ScanResponse, error) Set(context.Context, *SetRequest) (*empty.Empty, error) Delete(context.Context, *DeleteRequest) (*empty.Empty, error) Watch(*empty.Empty, KVS_WatchServer) error Metrics(context.Context, *empty.Empty) (*MetricsResponse, error) } // UnimplementedKVSServer can be embedded to have forward compatible implementations. type UnimplementedKVSServer struct { } func (*UnimplementedKVSServer) LivenessCheck(ctx context.Context, req *empty.Empty) (*LivenessCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method LivenessCheck not implemented") } func (*UnimplementedKVSServer) ReadinessCheck(ctx context.Context, req *empty.Empty) (*ReadinessCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadinessCheck not implemented") } func (*UnimplementedKVSServer) Node(ctx context.Context, req *empty.Empty) (*NodeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Node not implemented") } func (*UnimplementedKVSServer) Join(ctx context.Context, req *JoinRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") } func (*UnimplementedKVSServer) Cluster(ctx context.Context, req *empty.Empty) (*ClusterResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Cluster not implemented") } func (*UnimplementedKVSServer) Leave(ctx context.Context, req *LeaveRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") } func (*UnimplementedKVSServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") } func (*UnimplementedKVSServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") } func (*UnimplementedKVSServer) Scan(ctx context.Context, req *ScanRequest) (*ScanResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Scan not implemented") } func (*UnimplementedKVSServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") } func (*UnimplementedKVSServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") } func (*UnimplementedKVSServer) Watch(req *empty.Empty, srv KVS_WatchServer) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } func (*UnimplementedKVSServer) Metrics(ctx context.Context, req *empty.Empty) (*MetricsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") } func RegisterKVSServer(s *grpc.Server, srv KVSServer) { s.RegisterService(&_KVS_serviceDesc, srv) } func _KVS_LivenessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).LivenessCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/LivenessCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).LivenessCheck(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } func _KVS_ReadinessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).ReadinessCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/ReadinessCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).ReadinessCheck(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } func _KVS_Node_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Node(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Node", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Node(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } func _KVS_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(JoinRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Join(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Join", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Join(ctx, req.(*JoinRequest)) } return interceptor(ctx, in, info, handler) } func _KVS_Cluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Cluster(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Cluster", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Cluster(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } func _KVS_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LeaveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Leave(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Leave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Leave(ctx, req.(*LeaveRequest)) } return interceptor(ctx, in, info, handler) } func _KVS_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Snapshot(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Snapshot", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Snapshot(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } func _KVS_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } func _KVS_Scan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ScanRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Scan(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Scan", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Scan(ctx, req.(*ScanRequest)) } return interceptor(ctx, in, info, handler) } func _KVS_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Set(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Set", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Set(ctx, req.(*SetRequest)) } return interceptor(ctx, in, info, handler) } func _KVS_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Delete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Delete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Delete(ctx, req.(*DeleteRequest)) } return interceptor(ctx, in, info, handler) } func _KVS_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } return srv.(KVSServer).Watch(m, &kVSWatchServer{stream}) } type KVS_WatchServer interface { Send(*WatchResponse) error grpc.ServerStream } type kVSWatchServer struct { grpc.ServerStream } func (x *kVSWatchServer) Send(m *WatchResponse) error { return x.ServerStream.SendMsg(m) } func _KVS_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(KVSServer).Metrics(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/kvs.KVS/Metrics", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KVSServer).Metrics(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } var _KVS_serviceDesc = grpc.ServiceDesc{ ServiceName: "kvs.KVS", HandlerType: (*KVSServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "LivenessCheck", Handler: _KVS_LivenessCheck_Handler, }, { MethodName: "ReadinessCheck", Handler: _KVS_ReadinessCheck_Handler, }, { MethodName: "Node", Handler: _KVS_Node_Handler, }, { MethodName: "Join", Handler: _KVS_Join_Handler, }, { MethodName: "Cluster", Handler: _KVS_Cluster_Handler, }, { MethodName: "Leave", Handler: _KVS_Leave_Handler, }, { MethodName: "Snapshot", Handler: _KVS_Snapshot_Handler, }, { MethodName: "Get", Handler: _KVS_Get_Handler, }, { MethodName: "Scan", Handler: _KVS_Scan_Handler, }, { MethodName: "Set", Handler: _KVS_Set_Handler, }, { MethodName: "Delete", Handler: _KVS_Delete_Handler, }, { MethodName: "Metrics", Handler: _KVS_Metrics_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Watch", Handler: _KVS_Watch_Handler, ServerStreams: true, }, }, Metadata: "protobuf/kvs.proto", } ================================================ FILE: protobuf/kvs.pb.gw.go ================================================ // Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: protobuf/kvs.proto /* Package protobuf is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ package protobuf import ( "context" "io" "net/http" "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/empty" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) // Suppress "imported and not used" errors var _ codes.Code var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray var _ = descriptor.ForMessage func request_KVS_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.LivenessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.LivenessCheck(ctx, &protoReq) return msg, metadata, err } func request_KVS_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.ReadinessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.ReadinessCheck(ctx, &protoReq) return msg, metadata, err } func request_KVS_Node_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.Node(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Node_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.Node(ctx, &protoReq) return msg, metadata, err } func request_KVS_Join_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq JoinRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["id"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") } protoReq.Id, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) } msg, err := client.Join(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Join_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq JoinRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["id"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") } protoReq.Id, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) } msg, err := server.Join(ctx, &protoReq) return msg, metadata, err } func request_KVS_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.Cluster(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.Cluster(ctx, &protoReq) return msg, metadata, err } func request_KVS_Leave_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq LeaveRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["id"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") } protoReq.Id, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) } msg, err := client.Leave(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Leave_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq LeaveRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["id"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") } protoReq.Id, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) } msg, err := server.Leave(ctx, &protoReq) return msg, metadata, err } func request_KVS_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.Snapshot(ctx, &protoReq) return msg, metadata, err } func request_KVS_Get_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["key"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") } protoReq.Key, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) } msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Get_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["key"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") } protoReq.Key, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) } msg, err := server.Get(ctx, &protoReq) return msg, metadata, err } func request_KVS_Scan_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ScanRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["prefix"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "prefix") } protoReq.Prefix, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "prefix", err) } msg, err := client.Scan(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Scan_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ScanRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["prefix"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "prefix") } protoReq.Prefix, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "prefix", err) } msg, err := server.Scan(ctx, &protoReq) return msg, metadata, err } func request_KVS_Set_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SetRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["key"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") } protoReq.Key, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) } msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Set_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SetRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["key"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") } protoReq.Key, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) } msg, err := server.Set(ctx, &protoReq) return msg, metadata, err } func request_KVS_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq DeleteRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["key"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") } protoReq.Key, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) } msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Delete_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq DeleteRequest var metadata runtime.ServerMetadata var ( val string ok bool err error _ = err ) val, ok = pathParams["key"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") } protoReq.Key, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) } msg, err := server.Delete(ctx, &protoReq) return msg, metadata, err } func request_KVS_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, client KVSClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.Metrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_KVS_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, server KVSServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.Metrics(ctx, &protoReq) return msg, metadata, err } // RegisterKVSHandlerServer registers the http handlers for service KVS to "mux". // UnaryRPC :call KVSServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. func RegisterKVSHandlerServer(ctx context.Context, mux *runtime.ServeMux, server KVSServer) error { mux.Handle("GET", pattern_KVS_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_LivenessCheck_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_ReadinessCheck_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Node_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("PUT", pattern_KVS_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Join_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Cluster_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("DELETE", pattern_KVS_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Leave_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Snapshot_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Get_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Scan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Scan_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Scan_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("PUT", pattern_KVS_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Set_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("DELETE", pattern_KVS_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Delete_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_KVS_Metrics_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil } // RegisterKVSHandlerFromEndpoint is same as RegisterKVSHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterKVSHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } }() }() return RegisterKVSHandler(ctx, mux, conn) } // RegisterKVSHandler registers the http handlers for service KVS to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterKVSHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterKVSHandlerClient(ctx, mux, NewKVSClient(conn)) } // RegisterKVSHandlerClient registers the http handlers for service KVS // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "KVSClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVSClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "KVSClient" to call the correct interceptors. func RegisterKVSHandlerClient(ctx context.Context, mux *runtime.ServeMux, client KVSClient) error { mux.Handle("GET", pattern_KVS_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_LivenessCheck_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_ReadinessCheck_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Node_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("PUT", pattern_KVS_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Join_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Cluster_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("DELETE", pattern_KVS_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Leave_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Get_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Scan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Scan_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Scan_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("PUT", pattern_KVS_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Set_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("DELETE", pattern_KVS_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Delete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_KVS_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_KVS_Metrics_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_KVS_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil } var ( pattern_KVS_LivenessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "liveness_check"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_ReadinessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "readiness_check"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Node_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "node"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Join_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Cluster_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "cluster"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Leave_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Scan_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "prefix"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) pattern_KVS_Metrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( forward_KVS_LivenessCheck_0 = runtime.ForwardResponseMessage forward_KVS_ReadinessCheck_0 = runtime.ForwardResponseMessage forward_KVS_Node_0 = runtime.ForwardResponseMessage forward_KVS_Join_0 = runtime.ForwardResponseMessage forward_KVS_Cluster_0 = runtime.ForwardResponseMessage forward_KVS_Leave_0 = runtime.ForwardResponseMessage forward_KVS_Snapshot_0 = runtime.ForwardResponseMessage forward_KVS_Get_0 = runtime.ForwardResponseMessage forward_KVS_Scan_0 = runtime.ForwardResponseMessage forward_KVS_Set_0 = runtime.ForwardResponseMessage forward_KVS_Delete_0 = runtime.ForwardResponseMessage forward_KVS_Metrics_0 = runtime.ForwardResponseMessage ) ================================================ FILE: protobuf/kvs.proto ================================================ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/api/annotations.proto"; import "protoc-gen-swagger/options/annotations.proto"; package kvs; option go_package = "github.com/mosuka/cete/protobuf"; service KVS { rpc LivenessCheck (google.protobuf.Empty) returns (LivenessCheckResponse) { option (google.api.http) = { get: "/v1/liveness_check" }; } rpc ReadinessCheck (google.protobuf.Empty) returns (ReadinessCheckResponse) { option (google.api.http) = { get: "/v1/readiness_check" }; } rpc Node (google.protobuf.Empty) returns (NodeResponse) { option (google.api.http) = { get: "/v1/node" }; } rpc Join (JoinRequest) returns (google.protobuf.Empty) { option (google.api.http) = { put: "/v1/cluster/{id}" body: "node" }; } rpc Cluster (google.protobuf.Empty) returns (ClusterResponse) { option (google.api.http) = { get: "/v1/cluster" }; } rpc Leave (LeaveRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/cluster/{id}" }; } rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { option (google.api.http) = { get: "/v1/snapshot" }; } rpc Get (GetRequest) returns (GetResponse) { option (google.api.http) = { get: "/v1/data/{key=**}" }; } rpc Scan (ScanRequest) returns (ScanResponse) { option (google.api.http) = { get: "/v1/data/{prefix=**}" }; } rpc Set (SetRequest) returns (google.protobuf.Empty) { option (google.api.http) = { put: "/v1/data/{key=**}" body: "*" }; } rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/data/{key=**}" }; } rpc Watch (google.protobuf.Empty) returns (stream WatchResponse) {} rpc Metrics (google.protobuf.Empty) returns (MetricsResponse) { option (google.api.http) = { get: "/v1/metrics" }; } } message LivenessCheckResponse { bool alive = 1; } message ReadinessCheckResponse { bool ready = 1; } message Metadata { string grpc_address = 1; string http_address = 2; } message Node { string raft_address = 1; Metadata metadata = 2; string state = 3; } message Cluster { map nodes = 1; string leader = 2; } message JoinRequest { string id = 1; Node node = 2; } message LeaveRequest { string id = 1; } message NodeResponse { Node node = 1; } message ClusterResponse { Cluster cluster = 1; } message GetRequest { string key = 1; } message GetResponse { bytes value = 1; } message ScanRequest { string prefix = 1; } message ScanResponse { repeated bytes values = 1; } message SetRequest { string key = 1; bytes value = 2; } message DeleteRequest { string key = 1; } message SetMetadataRequest { string id = 1; Metadata metadata = 2; } message DeleteMetadataRequest { string id = 1; } message Event { enum Type { Unknown = 0; Join = 1; Leave = 2; Set = 3; Delete = 4; } Type type = 1; google.protobuf.Any data = 2; } message WatchResponse { Event event = 1; } message MetricsResponse { bytes metrics = 1; } message KeyValuePair { string key = 1; bytes value = 2; } ================================================ FILE: registry/type.go ================================================ package registry import ( "errors" "fmt" "reflect" ) type TypeRegistry map[string]reflect.Type var Types = make(TypeRegistry, 0) func RegisterType(name string, typ reflect.Type) { if _, exists := Types[name]; exists { panic(errors.New(fmt.Sprintf("attempted to register duplicate index: %s", name))) } Types[name] = typ } func TypeByName(name string) reflect.Type { return Types[name] } func TypeNameByInstance(instance interface{}) string { switch ins := instance.(type) { case map[string]interface{}: return reflect.TypeOf(ins).String() default: return reflect.TypeOf(ins).Elem().String() } } func TypeInstanceByName(name string) interface{} { return reflect.New(TypeByName(name)).Interface() } ================================================ FILE: server/grpc_gateway.go ================================================ package server import ( "context" "math" "net" "net/http" "time" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/mosuka/cete/marshaler" "github.com/mosuka/cete/protobuf" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" ) func responseFilter(ctx context.Context, w http.ResponseWriter, resp proto.Message) error { switch resp.(type) { case *protobuf.GetResponse: if r, ok := resp.(*protobuf.GetResponse); ok { w.Header().Set("Content-Type", http.DetectContentType(r.Value)) } case *protobuf.MetricsResponse: w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8") default: w.Header().Set("Content-Type", marshaler.DefaultContentType) } return nil } type GRPCGateway struct { httpAddress string grpcAddress string cancel context.CancelFunc listener net.Listener mux *runtime.ServeMux certificateFile string keyFile string logger *zap.Logger } func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCGateway, error) { dialOpts := []grpc.DialOption{ grpc.WithDefaultCallOptions( grpc.MaxCallSendMsgSize(math.MaxInt64), grpc.MaxCallRecvMsgSize(math.MaxInt64), ), grpc.WithKeepaliveParams( keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 5 * time.Second, PermitWithoutStream: true, }, ), } baseCtx := context.TODO() ctx, cancel := context.WithCancel(baseCtx) mux := runtime.NewServeMux( runtime.WithMarshalerOption(runtime.MIMEWildcard, new(marshaler.CeteMarshaler)), runtime.WithForwardResponseOption(responseFilter), ) if certificateFile == "" { dialOpts = append(dialOpts, grpc.WithInsecure()) } else { creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) if err != nil { return nil, err } dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) } err := protobuf.RegisterKVSHandlerFromEndpoint(ctx, mux, grpcAddress, dialOpts) if err != nil { logger.Error("failed to register KVS handler from endpoint", zap.Error(err)) return nil, err } listener, err := net.Listen("tcp", httpAddress) if err != nil { logger.Error("failed to create key value store service", zap.Error(err)) return nil, err } return &GRPCGateway{ httpAddress: httpAddress, grpcAddress: grpcAddress, listener: listener, mux: mux, cancel: cancel, certificateFile: certificateFile, keyFile: keyFile, logger: logger, }, nil } func (s *GRPCGateway) Start() error { if s.certificateFile == "" && s.keyFile == "" { go func() { _ = http.Serve(s.listener, s.mux) }() } else { go func() { _ = http.ServeTLS(s.listener, s.mux, s.certificateFile, s.keyFile) }() } s.logger.Info("gRPC gateway started", zap.String("http_address", s.httpAddress)) return nil } func (s *GRPCGateway) Stop() error { defer s.cancel() err := s.listener.Close() if err != nil { s.logger.Error("failed to close listener", zap.String("http_address", s.listener.Addr().String()), zap.Error(err)) } s.logger.Info("gRPC gateway stopped", zap.String("http_address", s.httpAddress)) return nil } ================================================ FILE: server/grpc_server.go ================================================ package server import ( "math" "net" "time" grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" grpczap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/mosuka/cete/metric" "github.com/mosuka/cete/protobuf" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" ) type GRPCServer struct { grpcAddress string service *GRPCService server *grpc.Server listener net.Listener certFile string keyFile string certHostname string logger *zap.Logger } func NewGRPCServer(grpcAddress string, raftServer *RaftServer, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCServer, error) { grpcLogger := logger.Named("grpc") opts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(math.MaxInt64), grpc.MaxSendMsgSize(math.MaxInt64), grpc.StreamInterceptor( grpcmiddleware.ChainStreamServer( metric.GrpcMetrics.StreamServerInterceptor(), grpczap.StreamServerInterceptor(grpcLogger), ), ), grpc.UnaryInterceptor( grpcmiddleware.ChainUnaryServer( metric.GrpcMetrics.UnaryServerInterceptor(), grpczap.UnaryServerInterceptor(grpcLogger), ), ), grpc.KeepaliveParams( keepalive.ServerParameters{ //MaxConnectionIdle: 0, //MaxConnectionAge: 0, //MaxConnectionAgeGrace: 0, Time: 5 * time.Second, Timeout: 5 * time.Second, }, ), } if certificateFile == "" && keyFile == "" { logger.Info("disabling TLS") } else { logger.Info("enabling TLS") creds, err := credentials.NewServerTLSFromFile(certificateFile, keyFile) if err != nil { logger.Error("failed to create credentials", zap.Error(err)) } opts = append(opts, grpc.Creds(creds)) } server := grpc.NewServer( opts..., ) service, err := NewGRPCService(raftServer, certificateFile, commonName, logger) if err != nil { logger.Error("failed to create key value store service", zap.Error(err)) return nil, err } protobuf.RegisterKVSServer(server, service) // Initialize all metrics. metric.GrpcMetrics.InitializeMetrics(server) grpc_prometheus.Register(server) listener, err := net.Listen("tcp", grpcAddress) if err != nil { logger.Error("failed to create listener", zap.String("grpc_address", grpcAddress), zap.Error(err)) return nil, err } return &GRPCServer{ grpcAddress: grpcAddress, service: service, server: server, listener: listener, certFile: certificateFile, keyFile: keyFile, certHostname: commonName, logger: logger, }, nil } func (s *GRPCServer) Start() error { if err := s.service.Start(); err != nil { s.logger.Error("failed to start service", zap.Error(err)) } go func() { _ = s.server.Serve(s.listener) }() s.logger.Info("gRPC server started", zap.String("grpc_address", s.grpcAddress)) return nil } func (s *GRPCServer) Stop() error { if err := s.service.Stop(); err != nil { s.logger.Error("failed to stop service", zap.Error(err)) } //s.server.GracefulStop() s.server.Stop() s.logger.Info("gRPC server stopped", zap.String("grpc_address", s.grpcAddress)) return nil } ================================================ FILE: server/grpc_service.go ================================================ package server import ( "bytes" "context" "sync" "time" "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/raft" "github.com/mosuka/cete/client" "github.com/mosuka/cete/errors" "github.com/mosuka/cete/metric" "github.com/mosuka/cete/protobuf" "github.com/prometheus/common/expfmt" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) type GRPCService struct { raftServer *RaftServer certificateFile string commonName string logger *zap.Logger watchMutex sync.RWMutex watchChans map[chan protobuf.WatchResponse]struct{} peerClients map[string]*client.GRPCClient watchClusterStopCh chan struct{} watchClusterDoneCh chan struct{} } func NewGRPCService(raftServer *RaftServer, certificateFile string, commonName string, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ raftServer: raftServer, certificateFile: certificateFile, commonName: commonName, logger: logger, watchChans: make(map[chan protobuf.WatchResponse]struct{}), peerClients: make(map[string]*client.GRPCClient, 0), watchClusterStopCh: make(chan struct{}), watchClusterDoneCh: make(chan struct{}), }, nil } func (s *GRPCService) Start() error { go func() { s.startWatchCluster(500 * time.Millisecond) }() s.logger.Info("gRPC service started") return nil } func (s *GRPCService) Stop() error { s.stopWatchCluster() s.logger.Info("gRPC service stopped") return nil } func (s *GRPCService) startWatchCluster(checkInterval time.Duration) { s.logger.Info("start to update cluster info") defer func() { close(s.watchClusterDoneCh) }() ticker := time.NewTicker(checkInterval) defer ticker.Stop() timeout := 60 * time.Second if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { if err == errors.ErrTimeout { s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) } else { s.logger.Error("failed to detect leader", zap.Error(err)) } } for { select { case <-s.watchClusterStopCh: s.logger.Info("received a request to stop updating a cluster") return case event := <-s.raftServer.applyCh: watchResp := &protobuf.WatchResponse{ Event: event, } for c := range s.watchChans { c <- *watchResp } case <-ticker.C: s.watchMutex.Lock() // open clients for peer nodes nodes, err := s.raftServer.Nodes() if err != nil { s.logger.Warn("failed to get cluster info", zap.String("err", err.Error())) } for id, node := range nodes { if id == s.raftServer.id { continue } if node.Metadata == nil || node.Metadata.GrpcAddress == "" { s.logger.Debug("gRPC address missing", zap.String("id", id)) continue } if c, ok := s.peerClients[id]; ok { if c.Target() != node.Metadata.GrpcAddress { s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) delete(s.peerClients, id) if err := c.Close(); err != nil { s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) } s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { s.peerClients[id] = newClient } else { s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) } } } else { s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { s.peerClients[id] = newClient } else { s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) } } } // close clients for non-existent peer nodes for id, c := range s.peerClients { if _, exist := nodes[id]; !exist { s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) delete(s.peerClients, id) if err := c.Close(); err != nil { s.logger.Warn("failed to close old client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) } } } s.watchMutex.Unlock() } } } func (s *GRPCService) stopWatchCluster() { if s.watchClusterStopCh != nil { s.logger.Info("send a request to stop updating a cluster") close(s.watchClusterStopCh) } s.logger.Info("wait for the cluster watching to stop") <-s.watchClusterDoneCh s.logger.Info("the cluster watching has been stopped") s.logger.Info("close all peer clients") for id, c := range s.peerClients { s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) delete(s.peerClients, id) if err := c.Close(); err != nil { s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) } } } func (s *GRPCService) LivenessCheck(ctx context.Context, req *empty.Empty) (*protobuf.LivenessCheckResponse, error) { resp := &protobuf.LivenessCheckResponse{} resp.Alive = true return resp, nil } func (s *GRPCService) ReadinessCheck(ctx context.Context, req *empty.Empty) (*protobuf.ReadinessCheckResponse, error) { resp := &protobuf.ReadinessCheckResponse{} timeout := 10 * time.Second if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { s.logger.Error("missing leader node", zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } if s.raftServer.State() == raft.Candidate || s.raftServer.State() == raft.Shutdown { err := errors.ErrNodeNotReady s.logger.Error(err.Error(), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } resp.Ready = true return resp, nil } func (s *GRPCService) Join(ctx context.Context, req *protobuf.JoinRequest) (*empty.Empty, error) { resp := &empty.Empty{} if s.raftServer.raft.State() != raft.Leader { clusterResp, err := s.Cluster(ctx, &empty.Empty{}) if err != nil { s.logger.Error("failed to get cluster info", zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } c := s.peerClients[clusterResp.Cluster.Leader] err = c.Join(req) if err != nil { s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } err := s.raftServer.Join(req.Id, req.Node) if err != nil { switch err { case errors.ErrNodeAlreadyExists: s.logger.Debug("node already exists", zap.Any("req", req), zap.Error(err)) default: s.logger.Error("failed to join node to the cluster", zap.String("id", req.Id), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } } return resp, nil } func (s *GRPCService) Leave(ctx context.Context, req *protobuf.LeaveRequest) (*empty.Empty, error) { resp := &empty.Empty{} if s.raftServer.raft.State() != raft.Leader { clusterResp, err := s.Cluster(ctx, &empty.Empty{}) if err != nil { s.logger.Error("failed to get cluster info", zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } c := s.peerClients[clusterResp.Cluster.Leader] err = c.Leave(req) if err != nil { s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } err := s.raftServer.Leave(req.Id) if err != nil { s.logger.Error("failed to leave node from the cluster", zap.Any("req", req), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } func (s *GRPCService) Node(ctx context.Context, req *empty.Empty) (*protobuf.NodeResponse, error) { resp := &protobuf.NodeResponse{} node, err := s.raftServer.Node() if err != nil { s.logger.Error("failed to get node info", zap.String("err", err.Error())) return resp, status.Error(codes.Internal, err.Error()) } resp.Node = node return resp, nil } func (s *GRPCService) Cluster(ctx context.Context, req *empty.Empty) (*protobuf.ClusterResponse, error) { resp := &protobuf.ClusterResponse{} cluster := &protobuf.Cluster{} nodes, err := s.raftServer.Nodes() if err != nil { s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) return resp, status.Error(codes.Internal, err.Error()) } for id, node := range nodes { if id == s.raftServer.id { node.State = s.raftServer.StateStr() } else { c := s.peerClients[id] nodeResp, err := c.Node() if err != nil { node.State = raft.Shutdown.String() s.logger.Error("failed to get node info", zap.String("grpc_address", node.Metadata.GrpcAddress), zap.String("err", err.Error())) } else { node.State = nodeResp.Node.State } } } cluster.Nodes = nodes serverID, err := s.raftServer.LeaderID(60 * time.Second) if err != nil { s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) return resp, status.Error(codes.Internal, err.Error()) } cluster.Leader = string(serverID) resp.Cluster = cluster return resp, nil } func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { resp := &empty.Empty{} err := s.raftServer.Snapshot() if err != nil { s.logger.Error("failed to snapshot data", zap.String("err", err.Error())) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } func (s *GRPCService) Get(ctx context.Context, req *protobuf.GetRequest) (*protobuf.GetResponse, error) { resp := &protobuf.GetResponse{} var err error resp, err = s.raftServer.Get(req) if err != nil { switch err { case errors.ErrNotFound: s.logger.Debug("key not found", zap.String("key", req.Key), zap.String("err", err.Error())) return resp, status.Error(codes.NotFound, err.Error()) default: s.logger.Debug("failed to get data", zap.String("key", req.Key), zap.String("err", err.Error())) return resp, status.Error(codes.Internal, err.Error()) } } return resp, nil } func (s *GRPCService) Scan(ctx context.Context, req *protobuf.ScanRequest) (*protobuf.ScanResponse, error) { resp := &protobuf.ScanResponse{} var err error resp, err = s.raftServer.Scan(req) if err != nil { switch err { default: s.logger.Debug("failed to scan data", zap.String("prefix", req.Prefix), zap.String("err", err.Error())) return resp, status.Error(codes.Internal, err.Error()) } } return resp, nil } func (s *GRPCService) Set(ctx context.Context, req *protobuf.SetRequest) (*empty.Empty, error) { resp := &empty.Empty{} if s.raftServer.raft.State() != raft.Leader { clusterResp, err := s.Cluster(ctx, &empty.Empty{}) if err != nil { s.logger.Error("failed to get cluster info", zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } c := s.peerClients[clusterResp.Cluster.Leader] err = c.Set(req) if err != nil { s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } err := s.raftServer.Set(req) if err != nil { s.logger.Error("failed to put data", zap.Any("req", req), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } func (s *GRPCService) Delete(ctx context.Context, req *protobuf.DeleteRequest) (*empty.Empty, error) { resp := &empty.Empty{} if s.raftServer.raft.State() != raft.Leader { clusterResp, err := s.Cluster(ctx, &empty.Empty{}) if err != nil { s.logger.Error("failed to get cluster info", zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } c := s.peerClients[clusterResp.Cluster.Leader] err = c.Delete(req) if err != nil { s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } err := s.raftServer.Delete(req) if err != nil { s.logger.Error("failed to delete data", zap.String("key", req.Key), zap.Error(err)) return resp, status.Error(codes.Internal, err.Error()) } return resp, nil } func (s *GRPCService) Watch(req *empty.Empty, server protobuf.KVS_WatchServer) error { chans := make(chan protobuf.WatchResponse) s.watchMutex.Lock() s.watchChans[chans] = struct{}{} s.watchMutex.Unlock() defer func() { s.watchMutex.Lock() delete(s.watchChans, chans) s.watchMutex.Unlock() close(chans) }() for resp := range chans { if err := server.Send(&resp); err != nil { s.logger.Error("failed to send watch data", zap.String("event", resp.Event.String()), zap.Error(err)) return status.Error(codes.Internal, err.Error()) } } return nil } func (s *GRPCService) Metrics(ctx context.Context, req *empty.Empty) (*protobuf.MetricsResponse, error) { resp := &protobuf.MetricsResponse{} var err error gather, err := metric.Registry.Gather() if err != nil { s.logger.Error("failed to get gather", zap.Error(err)) } out := &bytes.Buffer{} for _, mf := range gather { if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { s.logger.Error("failed to parse metric family", zap.Error(err)) } } resp.Metrics = out.Bytes() return resp, nil } ================================================ FILE: server/raft_fsm.go ================================================ package server import ( "context" "errors" "io" "io/ioutil" "os" "sync" "time" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" "github.com/mosuka/cete/marshaler" "github.com/mosuka/cete/protobuf" "github.com/mosuka/cete/storage" "go.uber.org/zap" ) type RaftFSM struct { logger *zap.Logger kvs *storage.KVS metadata map[string]*protobuf.Metadata nodesMutex sync.RWMutex applyCh chan *protobuf.Event } func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { err := os.MkdirAll(path, 0755) if err != nil && !os.IsExist(err) { logger.Error("failed to make directories", zap.String("path", path), zap.Error(err)) return nil, err } kvs, err := storage.NewKVS(path, path, logger) if err != nil { logger.Error("failed to create key value store", zap.String("path", path), zap.Error(err)) return nil, err } // TODO: Context should be passed down to allow for cascade cancellation. // TODO: GC should have its own flags for both the interval (--gc-interval=5m) and ratio (--gc-discard-ratio=0.5). kvs.RunGC(context.Background(), 5*time.Minute, 0.5) return &RaftFSM{ logger: logger, kvs: kvs, metadata: make(map[string]*protobuf.Metadata, 0), applyCh: make(chan *protobuf.Event, 1024), }, nil } func (f *RaftFSM) Close() error { f.applyCh <- nil f.logger.Info("apply channel has closed") err := f.kvs.Close() if err != nil { f.logger.Error("failed to close key value store", zap.Error(err)) return err } f.logger.Info("KVS has closed") return nil } func (f *RaftFSM) Get(key string) ([]byte, error) { value, err := f.kvs.Get(key) if err != nil { f.logger.Error("failed to get value", zap.String("key", key), zap.Error(err)) return nil, err } return value, nil } func (f *RaftFSM) Scan(prefix string) ([][]byte, error) { values, err := f.kvs.Scan(prefix) if err != nil { f.logger.Error("failed to scan values", zap.String("prefix", prefix), zap.Error(err)) return nil, err } return values, nil } func (f *RaftFSM) applySet(key string, value []byte) interface{} { err := f.kvs.Set(key, value) if err != nil { f.logger.Error("failed to set value", zap.String("key", key), zap.Error(err)) return err } return nil } func (f *RaftFSM) applyDelete(key string) interface{} { err := f.kvs.Delete(key) if err != nil { f.logger.Error("failed to delete value", zap.String("key", key), zap.Error(err)) return err } return nil } func (f *RaftFSM) getMetadata(id string) *protobuf.Metadata { if metadata, exists := f.metadata[id]; exists { return metadata } else { f.logger.Warn("metadata not found", zap.String("id", id)) return nil } } func (f *RaftFSM) setMetadata(id string, metadata *protobuf.Metadata) { f.nodesMutex.Lock() f.metadata[id] = metadata f.nodesMutex.Unlock() } func (f *RaftFSM) deleteMetadata(id string) { f.nodesMutex.Lock() if _, exists := f.metadata[id]; exists { delete(f.metadata, id) } else { f.logger.Warn("metadata not found", zap.String("id", id)) } f.nodesMutex.Unlock() } func (f *RaftFSM) applySetMetadata(id string, metadata *protobuf.Metadata) interface{} { f.logger.Debug("set metadata", zap.String("id", id), zap.Any("metadata", metadata)) f.setMetadata(id, metadata) return nil } func (f *RaftFSM) applyDeleteMetadata(nodeId string) interface{} { f.deleteMetadata(nodeId) return nil } func (f *RaftFSM) Apply(l *raft.Log) interface{} { var event protobuf.Event err := proto.Unmarshal(l.Data, &event) if err != nil { f.logger.Error("failed to unmarshal message bytes to KVS command", zap.Error(err)) return err } switch event.Type { case protobuf.Event_Join: data, err := marshaler.MarshalAny(event.Data) if err != nil { f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) return err } if data == nil { err = errors.New("nil") f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) return err } req := data.(*protobuf.SetMetadataRequest) ret := f.applySetMetadata(req.Id, req.Metadata) if ret == nil { f.applyCh <- &event } return ret case protobuf.Event_Leave: data, err := marshaler.MarshalAny(event.Data) if err != nil { f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) return err } if data == nil { err = errors.New("nil") f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) return err } req := *data.(*protobuf.DeleteMetadataRequest) ret := f.applyDeleteMetadata(req.Id) if ret == nil { f.applyCh <- &event } return ret case protobuf.Event_Set: data, err := marshaler.MarshalAny(event.Data) if err != nil { f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) return err } if data == nil { err = errors.New("nil") f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) return err } req := *data.(*protobuf.SetRequest) ret := f.applySet(req.Key, req.Value) if ret == nil { f.applyCh <- &event } return ret case protobuf.Event_Delete: data, err := marshaler.MarshalAny(event.Data) if err != nil { f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) return err } if data == nil { err = errors.New("nil") f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) return err } req := *data.(*protobuf.DeleteRequest) ret := f.applyDelete(req.Key) if ret == nil { f.applyCh <- &event } return ret default: err = errors.New("command type not support") f.logger.Error("unsupported command", zap.String("type", event.Type.String()), zap.Error(err)) return err } } func (f *RaftFSM) Stats() map[string]string { return f.kvs.Stats() } func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { return &KVSFSMSnapshot{ kvs: f.kvs, logger: f.logger, }, nil } func (f *RaftFSM) Restore(rc io.ReadCloser) error { start := time.Now() f.logger.Info("start to restore items") defer func() { err := rc.Close() if err != nil { f.logger.Error("failed to close reader", zap.Error(err)) } }() data, err := ioutil.ReadAll(rc) if err != nil { f.logger.Error("failed to open reader", zap.Error(err)) return err } keyCount := uint64(0) buff := proto.NewBuffer(data) for { kvp := &protobuf.KeyValuePair{} err = buff.DecodeMessage(kvp) if err == io.ErrUnexpectedEOF { f.logger.Debug("reached the EOF", zap.Error(err)) break } if err != nil { f.logger.Error("failed to read key value pair", zap.Error(err)) return err } // apply item to store err = f.kvs.Set(kvp.Key, kvp.Value) if err != nil { f.logger.Error("failed to set key value pair to key value store", zap.Error(err)) return err } f.logger.Debug("restore", zap.String("key", kvp.Key)) keyCount = keyCount + 1 } f.logger.Info("finished to restore items", zap.Uint64("count", keyCount), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) return nil } // --------------------- type KVSFSMSnapshot struct { kvs *storage.KVS logger *zap.Logger } func (f *KVSFSMSnapshot) Persist(sink raft.SnapshotSink) error { start := time.Now() f.logger.Info("start to persist items") defer func() { err := sink.Close() if err != nil { f.logger.Error("failed to close sink", zap.Error(err)) } }() ch := f.kvs.SnapshotItems() kvpCount := uint64(0) for { kvp := <-ch if kvp == nil { f.logger.Debug("channel closed") break } kvpCount = kvpCount + 1 buff := proto.NewBuffer([]byte{}) err := buff.EncodeMessage(kvp) if err != nil { f.logger.Error("failed to encode key value pair", zap.Error(err)) return err } _, err = sink.Write(buff.Bytes()) if err != nil { f.logger.Error("failed to write key value pair", zap.Error(err)) return err } } f.logger.Info("finished to persist items", zap.Uint64("count", kvpCount), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) return nil } func (f *KVSFSMSnapshot) Release() { f.logger.Info("release") } ================================================ FILE: server/raft_server.go ================================================ package server import ( "encoding/json" "io/ioutil" "net" "os" "path/filepath" "strconv" "time" raftbadgerdb "github.com/BBVA/raft-badger" "github.com/dgraph-io/badger/v2" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/raft" "github.com/mosuka/cete/errors" "github.com/mosuka/cete/marshaler" "github.com/mosuka/cete/metric" "github.com/mosuka/cete/protobuf" "go.uber.org/zap" ) type RaftServer struct { id string raftAddress string dataDirectory string bootstrap bool logger *zap.Logger fsm *RaftFSM transport *raft.NetworkTransport raft *raft.Raft watchClusterStopCh chan struct{} watchClusterDoneCh chan struct{} applyCh chan *protobuf.Event } func NewRaftServer(id string, raftAddress string, dataDirectory string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { fsmPath := filepath.Join(dataDirectory, "kvs") fsm, err := NewRaftFSM(fsmPath, logger) if err != nil { logger.Error("failed to create FSM", zap.String("path", fsmPath), zap.Error(err)) return nil, err } return &RaftServer{ id: id, raftAddress: raftAddress, dataDirectory: dataDirectory, bootstrap: bootstrap, fsm: fsm, logger: logger, watchClusterStopCh: make(chan struct{}), watchClusterDoneCh: make(chan struct{}), applyCh: make(chan *protobuf.Event, 1024), }, nil } func (s *RaftServer) Start() error { config := raft.DefaultConfig() config.LocalID = raft.ServerID(s.id) config.SnapshotThreshold = 1024 config.LogOutput = ioutil.Discard addr, err := net.ResolveTCPAddr("tcp", s.raftAddress) if err != nil { s.logger.Error("failed to resolve TCP address", zap.String("raft_address", s.raftAddress), zap.Error(err)) return err } s.transport, err = raft.NewTCPTransport(s.raftAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Error("failed to create TCP transport", zap.String("raft_address", s.raftAddress), zap.Error(err)) return err } // create snapshot store snapshotStore, err := raft.NewFileSnapshotStore(s.dataDirectory, 2, ioutil.Discard) if err != nil { s.logger.Error("failed to create file snapshot store", zap.String("path", s.dataDirectory), zap.Error(err)) return err } logStorePath := filepath.Join(s.dataDirectory, "raft", "log") err = os.MkdirAll(logStorePath, 0755) if err != nil { s.logger.Fatal(err.Error()) return err } logStoreBadgerOpts := badger.DefaultOptions(logStorePath) logStoreBadgerOpts.ValueDir = logStorePath logStoreBadgerOpts.SyncWrites = false logStoreBadgerOpts.Logger = nil logStoreOpts := raftbadgerdb.Options{ Path: logStorePath, BadgerOptions: &logStoreBadgerOpts, } raftLogStore, err := raftbadgerdb.New(logStoreOpts) if err != nil { s.logger.Fatal(err.Error()) return err } stableStorePath := filepath.Join(s.dataDirectory, "raft", "stable") err = os.MkdirAll(stableStorePath, 0755) if err != nil { s.logger.Fatal(err.Error()) return err } stableStoreBadgerOpts := badger.DefaultOptions(stableStorePath) stableStoreBadgerOpts.ValueDir = stableStorePath stableStoreBadgerOpts.SyncWrites = false stableStoreBadgerOpts.Logger = nil stableStoreOpts := raftbadgerdb.Options{ Path: stableStorePath, BadgerOptions: &stableStoreBadgerOpts, } raftStableStore, err := raftbadgerdb.New(stableStoreOpts) if err != nil { s.logger.Fatal(err.Error()) return err } // create raft s.raft, err = raft.NewRaft(config, s.fsm, raftLogStore, raftStableStore, snapshotStore, s.transport) if err != nil { s.logger.Error("failed to create raft", zap.Any("config", config), zap.Error(err)) return err } if s.bootstrap { configuration := raft.Configuration{ Servers: []raft.Server{ { ID: config.LocalID, Address: s.transport.LocalAddr(), }, }, } s.raft.BootstrapCluster(configuration) } go func() { s.startWatchCluster(500 * time.Millisecond) }() s.logger.Info("Raft server started", zap.String("raft_address", s.raftAddress)) return nil } func (s *RaftServer) Stop() error { s.applyCh <- nil s.logger.Info("apply channel has closed") s.stopWatchCluster() if err := s.fsm.Close(); err != nil { s.logger.Error("failed to close FSM", zap.Error(err)) } s.logger.Info("Raft FSM Closed") if future := s.raft.Shutdown(); future.Error() != nil { s.logger.Info("failed to shutdown Raft", zap.Error(future.Error())) } s.logger.Info("Raft has shutdown", zap.String("raft_address", s.raftAddress)) return nil } func (s *RaftServer) startWatchCluster(checkInterval time.Duration) { s.logger.Info("start to update cluster info") defer func() { close(s.watchClusterDoneCh) }() ticker := time.NewTicker(checkInterval) defer ticker.Stop() timeout := 60 * time.Second if err := s.WaitForDetectLeader(timeout); err != nil { if err == errors.ErrTimeout { s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) } else { s.logger.Error("failed to detect leader", zap.Error(err)) } } for { select { case <-s.watchClusterStopCh: s.logger.Info("received a request to stop updating a cluster") return case <-s.raft.LeaderCh(): s.logger.Info("became a leader", zap.String("leaderAddr", string(s.raft.Leader()))) case event := <-s.fsm.applyCh: s.applyCh <- event case <-ticker.C: raftStats := s.raft.Stats() switch raftStats["state"] { case "Follower": metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Follower)) case "Candidate": metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Candidate)) case "Leader": metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Leader)) case "Shutdown": metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Shutdown)) } if term, err := strconv.ParseFloat(raftStats["term"], 64); err == nil { metric.RaftTermMetric.WithLabelValues(s.id).Set(term) } if lastLogIndex, err := strconv.ParseFloat(raftStats["last_log_index"], 64); err == nil { metric.RaftLastLogIndexMetric.WithLabelValues(s.id).Set(lastLogIndex) } if lastLogTerm, err := strconv.ParseFloat(raftStats["last_log_term"], 64); err == nil { metric.RaftLastLogTermMetric.WithLabelValues(s.id).Set(lastLogTerm) } if commitIndex, err := strconv.ParseFloat(raftStats["commit_index"], 64); err == nil { metric.RaftCommitIndexMetric.WithLabelValues(s.id).Set(commitIndex) } if appliedIndex, err := strconv.ParseFloat(raftStats["applied_index"], 64); err == nil { metric.RaftAppliedIndexMetric.WithLabelValues(s.id).Set(appliedIndex) } if fsmPending, err := strconv.ParseFloat(raftStats["fsm_pending"], 64); err == nil { metric.RaftFsmPendingMetric.WithLabelValues(s.id).Set(fsmPending) } if lastSnapshotIndex, err := strconv.ParseFloat(raftStats["last_snapshot_index"], 64); err == nil { metric.RaftLastSnapshotIndexMetric.WithLabelValues(s.id).Set(lastSnapshotIndex) } if lastSnapshotTerm, err := strconv.ParseFloat(raftStats["last_snapshot_term"], 64); err == nil { metric.RaftLastSnapshotTermMetric.WithLabelValues(s.id).Set(lastSnapshotTerm) } if latestConfigurationIndex, err := strconv.ParseFloat(raftStats["latest_configuration_index"], 64); err == nil { metric.RaftLatestConfigurationIndexMetric.WithLabelValues(s.id).Set(latestConfigurationIndex) } if numPeers, err := strconv.ParseFloat(raftStats["num_peers"], 64); err == nil { metric.RaftNumPeersMetric.WithLabelValues(s.id).Set(numPeers) } if lastContact, err := strconv.ParseFloat(raftStats["last_contact"], 64); err == nil { metric.RaftLastContactMetric.WithLabelValues(s.id).Set(lastContact) } if nodes, err := s.Nodes(); err == nil { metric.RaftNumNodesMetric.WithLabelValues(s.id).Set(float64(len(nodes))) } kvsStats := s.fsm.Stats() if numReads, err := strconv.ParseFloat(kvsStats["num_reads"], 64); err == nil { metric.KvsNumReadsMetric.WithLabelValues(s.id).Set(numReads) } if numWrites, err := strconv.ParseFloat(kvsStats["num_writes"], 64); err == nil { metric.KvsNumWritesMetric.WithLabelValues(s.id).Set(numWrites) } if numBytesRead, err := strconv.ParseFloat(kvsStats["num_bytes_read"], 64); err == nil { metric.KvsNumBytesReadMetric.WithLabelValues(s.id).Set(numBytesRead) } if numBytesWritten, err := strconv.ParseFloat(kvsStats["num_bytes_written"], 64); err == nil { metric.KvsNumBytesWrittenMetric.WithLabelValues(s.id).Set(numBytesWritten) } var numLsmGets map[string]interface{} if err := json.Unmarshal([]byte(kvsStats["num_lsm_gets"]), &numLsmGets); err == nil { for key, value := range numLsmGets { s.logger.Info("", zap.String("key", key), zap.Any("value", value)) } } var numLsmBloomHits map[string]interface{} if err := json.Unmarshal([]byte(kvsStats["num_lsm_bloom_Hits"]), &numLsmBloomHits); err == nil { for key, value := range numLsmBloomHits { s.logger.Info("", zap.String("key", key), zap.Any("value", value)) } } if numGets, err := strconv.ParseFloat(kvsStats["num_gets"], 64); err == nil { metric.KvsNumGetsMetric.WithLabelValues(s.id).Set(numGets) } if numPuts, err := strconv.ParseFloat(kvsStats["num_puts"], 64); err == nil { metric.KvsNumPutsMetric.WithLabelValues(s.id).Set(numPuts) } if numBlockedPuts, err := strconv.ParseFloat(kvsStats["num_blocked_puts"], 64); err == nil { metric.KvsNumBlockedPutsMetric.WithLabelValues(s.id).Set(numBlockedPuts) } if numMemtablesGets, err := strconv.ParseFloat(kvsStats["num_memtables_gets"], 64); err == nil { metric.KvsNumMemtablesGetsMetric.WithLabelValues(s.id).Set(numMemtablesGets) } var lsmSize map[string]interface{} if err := json.Unmarshal([]byte(kvsStats["lsm_size"]), &lsmSize); err == nil { for key, value := range lsmSize { metric.KvsLSMSizeMetric.WithLabelValues(s.id, key).Set(value.(float64)) } } var vlogSize map[string]interface{} if err := json.Unmarshal([]byte(kvsStats["vlog_size"]), &vlogSize); err == nil { for key, value := range vlogSize { metric.KvsVlogSizeMetric.WithLabelValues(s.id, key).Set(value.(float64)) } } var pendingWrites map[string]interface{} if err := json.Unmarshal([]byte(kvsStats["pending_writes"]), &pendingWrites); err == nil { for key, value := range pendingWrites { metric.KvsPendingWritesMetric.WithLabelValues(s.id, key).Set(value.(float64)) } } } } } func (s *RaftServer) stopWatchCluster() { if s.watchClusterStopCh != nil { s.logger.Info("send a request to stop updating a cluster") close(s.watchClusterStopCh) } s.logger.Info("wait for the cluster update to stop") <-s.watchClusterDoneCh s.logger.Info("the cluster update has been stopped") } func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() timer := time.NewTimer(timeout) defer timer.Stop() for { select { case <-ticker.C: leaderAddr := s.raft.Leader() if leaderAddr != "" { s.logger.Debug("detected a leader address", zap.String("raft_address", string(leaderAddr))) return leaderAddr, nil } case <-timer.C: err := errors.ErrTimeout s.logger.Error("failed to detect leader address", zap.Error(err)) return "", err } } } func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { leaderAddr, err := s.LeaderAddress(timeout) if err != nil { s.logger.Error("failed to get leader address", zap.Error(err)) return "", err } cf := s.raft.GetConfiguration() if err := cf.Error(); err != nil { s.logger.Error("failed to get Raft configuration", zap.Error(err)) return "", err } for _, server := range cf.Configuration().Servers { if server.Address == leaderAddr { s.logger.Info("detected a leader ID", zap.String("id", string(server.ID))) return server.ID, nil } } err = errors.ErrNotFoundLeader s.logger.Error("failed to detect leader ID", zap.Error(err)) return "", err } func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { if _, err := s.LeaderAddress(timeout); err != nil { s.logger.Error("failed to wait for detect leader", zap.Error(err)) return err } return nil } func (s *RaftServer) State() raft.RaftState { return s.raft.State() } func (s *RaftServer) StateStr() string { return s.State().String() } func (s *RaftServer) Exist(id string) (bool, error) { exist := false cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { s.logger.Error("failed to get Raft configuration", zap.Error(err)) return false, err } for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { s.logger.Debug("node already joined the cluster", zap.String("id", id)) exist = true break } } return exist, nil } func (s *RaftServer) join(id string, metadata *protobuf.Metadata) error { data := &protobuf.SetMetadataRequest{ Id: id, Metadata: metadata, } dataAny := &any.Any{} err := marshaler.UnmarshalAny(data, dataAny) if err != nil { s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) return err } c := &protobuf.Event{ Type: protobuf.Event_Join, Data: dataAny, } msg, err := proto.Marshal(c) if err != nil { s.logger.Error("failed to marshal the command into the bytes as message", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) return err } f := s.raft.Apply(msg, 10*time.Second) if err = f.Error(); err != nil { s.logger.Error("failed to apply message", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) return err } return nil } func (s *RaftServer) Join(id string, node *protobuf.Node) error { nodeExists, err := s.Exist(id) if err != nil { return err } if nodeExists { s.logger.Debug("node already exists", zap.String("id", id), zap.String("raft_address", node.RaftAddress)) } else { if future := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(node.RaftAddress), 0, 0); future.Error() != nil { s.logger.Error("failed to add voter", zap.String("id", id), zap.String("raft_address", node.RaftAddress), zap.Error(future.Error())) return future.Error() } s.logger.Info("node has successfully joined", zap.String("id", id), zap.String("raft_address", node.RaftAddress)) } if err := s.join(id, node.Metadata); err != nil { s.logger.Error("failed to set node metadata", zap.String("id", id), zap.Any("metadata", node.Metadata), zap.Error(err)) return err } s.logger.Info("node metadata has successfully set", zap.String("id", id), zap.Any("metadata", node.Metadata)) if nodeExists { return errors.ErrNodeAlreadyExists } else { return nil } } func (s *RaftServer) leave(id string) error { data := &protobuf.DeleteMetadataRequest{ Id: id, } dataAny := &any.Any{} err := marshaler.UnmarshalAny(data, dataAny) if err != nil { s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Error(err)) return err } c := &protobuf.Event{ Type: protobuf.Event_Leave, Data: dataAny, } msg, err := proto.Marshal(c) if err != nil { s.logger.Error("failed to marshal the command into the bytes as the message", zap.String("id", id), zap.Error(err)) return err } f := s.raft.Apply(msg, 10*time.Second) if err = f.Error(); err != nil { s.logger.Error("failed to apply the message", zap.String("id", id), zap.Error(err)) return err } return nil } func (s *RaftServer) Leave(id string) error { nodeExists, err := s.Exist(id) if err != nil { return err } if nodeExists { if future := s.raft.RemoveServer(raft.ServerID(id), 0, 0); future.Error() != nil { s.logger.Error("failed to remove server", zap.String("id", id), zap.Error(future.Error())) return future.Error() } s.logger.Info("node has successfully left", zap.String("id", id)) } else { s.logger.Debug("node does not exists", zap.String("id", id)) } if err = s.leave(id); err != nil { s.logger.Error("failed to join node", zap.String("id", id), zap.Error(err)) return err } return nil } func (s *RaftServer) Node() (*protobuf.Node, error) { nodes, err := s.Nodes() if err != nil { return nil, err } node, ok := nodes[s.id] if !ok { return nil, errors.ErrNotFound } node.State = s.StateStr() return node, nil } func (s *RaftServer) Nodes() (map[string]*protobuf.Node, error) { cf := s.raft.GetConfiguration() if err := cf.Error(); err != nil { s.logger.Error("failed to get Raft configuration", zap.Error(err)) return nil, err } nodes := make(map[string]*protobuf.Node, 0) for _, server := range cf.Configuration().Servers { nodes[string(server.ID)] = &protobuf.Node{ RaftAddress: string(server.Address), Metadata: s.fsm.getMetadata(string(server.ID)), } } return nodes, nil } func (s *RaftServer) Snapshot() error { if future := s.raft.Snapshot(); future.Error() != nil { s.logger.Error("failed to snapshot", zap.Error(future.Error())) return future.Error() } return nil } func (s *RaftServer) Get(req *protobuf.GetRequest) (*protobuf.GetResponse, error) { value, err := s.fsm.Get(req.Key) if err != nil { s.logger.Error("failed to get", zap.Any("key", req.Key), zap.Error(err)) return nil, err } resp := &protobuf.GetResponse{ Value: value, } return resp, nil } func (s *RaftServer) Scan(req *protobuf.ScanRequest) (*protobuf.ScanResponse, error) { values, err := s.fsm.Scan(req.Prefix) if err != nil { s.logger.Error("failed to scan", zap.Any("prefix", req.Prefix), zap.Error(err)) return nil, err } resp := &protobuf.ScanResponse{ Values: values, } return resp, nil } func (s *RaftServer) Set(req *protobuf.SetRequest) error { kvpAny := &any.Any{} if err := marshaler.UnmarshalAny(req, kvpAny); err != nil { s.logger.Error("failed to unmarshal request to the command data", zap.String("key", req.Key), zap.Error(err)) return err } c := &protobuf.Event{ Type: protobuf.Event_Set, Data: kvpAny, } msg, err := proto.Marshal(c) if err != nil { s.logger.Error("failed to marshal the command into the bytes as the message", zap.String("key", req.Key), zap.Error(err)) return err } if future := s.raft.Apply(msg, 10*time.Second); future.Error() != nil { s.logger.Error("failed to apply the message", zap.Error(future.Error())) return future.Error() } return nil } func (s *RaftServer) Delete(req *protobuf.DeleteRequest) error { kvpAny := &any.Any{} if err := marshaler.UnmarshalAny(req, kvpAny); err != nil { s.logger.Error("failed to unmarshal request to the command data", zap.String("key", req.Key), zap.Error(err)) return err } c := &protobuf.Event{ Type: protobuf.Event_Delete, Data: kvpAny, } msg, err := proto.Marshal(c) if err != nil { s.logger.Error("failed to marshal the command into the bytes as the message", zap.String("key", req.Key), zap.Error(err)) return err } if future := s.raft.Apply(msg, 10*time.Second); future.Error() != nil { s.logger.Error("failed to unmarshal request to the command data", zap.String("key", req.Key), zap.Error(future.Error())) return future.Error() } return nil } ================================================ FILE: storage/kvs.go ================================================ package storage import ( "context" "time" "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/badger/v2/y" "github.com/mosuka/cete/errors" "github.com/mosuka/cete/protobuf" "go.uber.org/zap" ) type KVS struct { dir string valueDir string db *badger.DB logger *zap.Logger } func NewKVS(dir string, valueDir string, logger *zap.Logger) (*KVS, error) { opts := badger.DefaultOptions(dir) opts.ValueDir = valueDir opts.SyncWrites = false opts.Logger = nil db, err := badger.Open(opts) if err != nil { logger.Error("failed to open database", zap.Any("opts", opts), zap.Error(err)) return nil, err } return &KVS{ dir: dir, valueDir: valueDir, db: db, logger: logger, }, nil } func (k *KVS) Close() error { if err := k.db.Close(); err != nil { k.logger.Error("failed to close database", zap.Error(err)) return err } return nil } func (k *KVS) RunGC(ctx context.Context, interval time.Duration, discardRatio float64) { go func() { ticker := time.NewTicker(interval) defer ticker.Stop() for { select { case <-ticker.C: start := time.Now() for { err := k.db.RunValueLogGC(discardRatio) if err != nil { if err == badger.ErrNoRewrite { break } k.logger.Error("garbage collection failed", zap.Error(err)) break } } k.logger.Info("garbage collection finished", zap.Float64("time", float64(time.Since(start))/float64(time.Second))) case <-ctx.Done(): return } } }() } func (k *KVS) Get(key string) ([]byte, error) { start := time.Now() var value []byte if err := k.db.View(func(txn *badger.Txn) error { item, err := txn.Get([]byte(key)) if err != nil { k.logger.Error("failed to get item", zap.String("key", key), zap.Error(err)) return err } err = item.Value(func(val []byte) error { value = append([]byte{}, val...) return nil }) if err != nil { k.logger.Error("failed to get item value", zap.String("key", key), zap.Error(err)) return err } return nil }); err == badger.ErrKeyNotFound { k.logger.Debug("not found", zap.String("key", key), zap.Error(err)) return nil, errors.ErrNotFound } else if err != nil { k.logger.Error("failed to get value", zap.String("key", key), zap.Error(err)) return nil, err } k.logger.Debug("get", zap.String("key", key), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) return value, nil } func (k *KVS) Scan(prefix string) ([][]byte, error) { start := time.Now() var value [][]byte if err := k.db.View(func(txn *badger.Txn) error { it := txn.NewIterator(badger.DefaultIteratorOptions) defer it.Close() prefixBytes := []byte(prefix) for it.Seek(prefixBytes); it.ValidForPrefix(prefixBytes); it.Next() { item := it.Item() err := item.Value(func(val []byte) error { value = append(value, append([]byte{}, val...)) return nil }) if err != nil { return err } } return nil }); err != nil { k.logger.Error("failed to scan value", zap.String("prefix", prefix), zap.Error(err)) return nil, err } k.logger.Debug("scan", zap.String("prefix", prefix), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) return value, nil } func (k *KVS) Set(key string, value []byte) error { start := time.Now() if err := k.db.Update(func(txn *badger.Txn) error { err := txn.Set([]byte(key), value) if err != nil { k.logger.Error("failed to set item", zap.String("key", key), zap.Error(err)) return err } return nil }); err != nil { k.logger.Error("failed to set value", zap.String("key", key), zap.Error(err)) return err } k.logger.Debug("set", zap.String("key", key), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) return nil } func (k *KVS) Delete(key string) error { start := time.Now() if err := k.db.Update(func(txn *badger.Txn) error { err := txn.Delete([]byte(key)) if err != nil { k.logger.Error("failed to delete item", zap.String("key", key), zap.Error(err)) return err } return nil }); err != nil { k.logger.Error("failed to delete value", zap.String("key", key), zap.Error(err)) return err } k.logger.Debug("delete", zap.String("key", key), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) return nil } func (k *KVS) Stats() map[string]string { stats := map[string]string{} stats["num_reads"] = y.NumReads.String() stats["num_writes"] = y.NumWrites.String() stats["num_bytes_read"] = y.NumBytesRead.String() stats["num_bytes_written"] = y.NumBytesWritten.String() stats["num_lsm_gets"] = y.NumLSMGets.String() stats["num_lsm_bloom_Hits"] = y.NumLSMBloomHits.String() stats["num_gets"] = y.NumGets.String() stats["num_puts"] = y.NumPuts.String() stats["num_blocked_puts"] = y.NumBlockedPuts.String() stats["num_memtables_gets"] = y.NumMemtableGets.String() stats["lsm_size"] = y.LSMSize.String() stats["vlog_size"] = y.VlogSize.String() stats["pending_writes"] = y.PendingWrites.String() return stats } func (k *KVS) SnapshotItems() <-chan *protobuf.KeyValuePair { ch := make(chan *protobuf.KeyValuePair, 1024) go func() { start := time.Now() k.logger.Info("start to snapshot items") keyCount := uint64(0) if err := k.db.View(func(txn *badger.Txn) error { opts := badger.DefaultIteratorOptions opts.PrefetchSize = 10 it := txn.NewIterator(opts) defer it.Close() for it.Rewind(); it.Valid(); it.Next() { item := it.Item() key := string(item.Key()) var value []byte if err := item.Value(func(val []byte) error { value = append([]byte{}, val...) return nil }); err != nil { k.logger.Error("failed to get item value", zap.String("key", key), zap.Error(err)) return err } ch <- &protobuf.KeyValuePair{ Key: key, Value: append([]byte{}, value...), } keyCount = keyCount + 1 } ch <- nil return nil }); err != nil { k.logger.Error("failed to snapshot items", zap.Error(err)) return } k.logger.Info("finished to snapshot items", zap.Uint64("count", keyCount), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) }() return ch } ================================================ FILE: version/version.go ================================================ package version var ( Version = "latest" )