Repository: tidwall/finn
Branch: master
Commit: e430290cc478
Files: 7
Total size: 61.2 KB
Directory structure:
gitextract_cfjio72r/
├── LICENSE
├── README.md
├── example/
│ └── clone.go
├── finn.go
├── finn_test.go
├── go.mod
└── go.sum
================================================
FILE CONTENTS
================================================
================================================
FILE: LICENSE
================================================
The MIT License (MIT)
Copyright (c) 2016 Josh Baker
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================
FILE: README.md
================================================
**This project has been archived. Please check out [Uhaha](https://github.com/tidwall/uhaha) for a fitter, happier, more productive Raft framework.**
Finn is a fast and simple framework for building [Raft](https://raft.github.io/) implementations in Go. It uses [Redcon](https://github.com/tidwall/redcon) for the network transport and [Hashicorp Raft](https://github.com/hashicorp/raft). There is also the option to use [LevelDB](https://github.com/syndtr/goleveldb), [BoltDB](https://github.com/boltdb/bolt) or [FastLog](https://github.com/tidwall/raft-fastlog) for log persistence.
Features
--------
- Simple API for quickly creating a [fault-tolerant](https://en.wikipedia.org/wiki/Fault_tolerance) cluster
- Fast network protocol using the [raft-redcon](https://github.com/tidwall/raft-redcon) transport
- Optional [backends](#log-backends) for log persistence. [LevelDB](https://github.com/syndtr/goleveldb), [BoltDB](https://github.com/boltdb/bolt), or [FastLog](https://github.com/tidwall/raft-fastlog)
- Adjustable [consistency and durability](#consistency-and-durability) levels
- A [full-featured example](#full-featured-example) to help jumpstart integration
- [Built-in raft commands](#built-in-raft-commands) for monitoring and managing the cluster
- Supports the [Redis log format](http://build47.com/redis-log-format-levels/)
- Works with clients such as [redigo](https://github.com/garyburd/redigo), [redis-py](https://github.com/andymccurdy/redis-py), [node_redis](https://github.com/NodeRedis/node_redis), [jedis](https://github.com/xetorthio/jedis), and [redis-cli](http://redis.io/topics/rediscli)
Getting Started
---------------
### Installing
To start using Finn, install Go and run `go get`:
```sh
$ go get -u github.com/tidwall/finn
```
This will retrieve the library.
### Example
Here's an example of a Redis clone that accepts the GET, SET, DEL, and KEYS commands.
You can run a [full-featured version](#full-featured-example) of this example from a terminal:
```
go run example/clone.go
```
```go
package main
import (
"encoding/json"
"io"
"io/ioutil"
"log"
"sort"
"strings"
"sync"
"github.com/tidwall/finn"
"github.com/tidwall/match"
"github.com/tidwall/redcon"
)
func main() {
n, err := finn.Open("data", ":7481", "", NewClone(), nil)
if err != nil {
log.Fatal(err)
}
defer n.Close()
select {}
}
type Clone struct {
mu sync.RWMutex
keys map[string][]byte
}
func NewClone() *Clone {
return &Clone{keys: make(map[string][]byte)}
}
func (kvm *Clone) Command(m finn.Applier, conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
switch strings.ToLower(string(cmd.Args[0])) {
default:
return nil, finn.ErrUnknownCommand
case "set":
if len(cmd.Args) != 3 {
return nil, finn.ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd,
func() (interface{}, error) {
kvm.mu.Lock()
kvm.keys[string(cmd.Args[1])] = cmd.Args[2]
kvm.mu.Unlock()
return nil, nil
},
func(v interface{}) (interface{}, error) {
conn.WriteString("OK")
return nil, nil
},
)
case "get":
if len(cmd.Args) != 2 {
return nil, finn.ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd, nil,
func(interface{}) (interface{}, error) {
kvm.mu.RLock()
val, ok := kvm.keys[string(cmd.Args[1])]
kvm.mu.RUnlock()
if !ok {
conn.WriteNull()
} else {
conn.WriteBulk(val)
}
return nil, nil
},
)
case "del":
if len(cmd.Args) < 2 {
return nil, finn.ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd,
func() (interface{}, error) {
var n int
kvm.mu.Lock()
for i := 1; i < len(cmd.Args); i++ {
key := string(cmd.Args[i])
if _, ok := kvm.keys[key]; ok {
delete(kvm.keys, key)
n++
}
}
kvm.mu.Unlock()
return n, nil
},
func(v interface{}) (interface{}, error) {
n := v.(int)
conn.WriteInt(n)
return nil, nil
},
)
case "keys":
if len(cmd.Args) != 2 {
return nil, finn.ErrWrongNumberOfArguments
}
pattern := string(cmd.Args[1])
return m.Apply(conn, cmd, nil,
func(v interface{}) (interface{}, error) {
var keys []string
kvm.mu.RLock()
for key := range kvm.keys {
if match.Match(key, pattern) {
keys = append(keys, key)
}
}
kvm.mu.RUnlock()
sort.Strings(keys)
conn.WriteArray(len(keys))
for _, key := range keys {
conn.WriteBulkString(key)
}
return nil, nil
},
)
}
}
func (kvm *Clone) Restore(rd io.Reader) error {
kvm.mu.Lock()
defer kvm.mu.Unlock()
data, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
var keys map[string][]byte
if err := json.Unmarshal(data, &keys); err != nil {
return err
}
kvm.keys = keys
return nil
}
func (kvm *Clone) Snapshot(wr io.Writer) error {
kvm.mu.RLock()
defer kvm.mu.RUnlock()
data, err := json.Marshal(kvm.keys)
if err != nil {
return err
}
if _, err := wr.Write(data); err != nil {
return err
}
return nil
}
```
The Applier Type
----------------
Every `Command()` call provides an `Applier` type which is responsible for handling all Read or Write operation. In the above example you will see one `m.Apply(conn, cmd, ...)` for each command.
The signature for the `Apply()` function is:
```go
func Apply(
conn redcon.Conn,
cmd redcon.Command,
mutate func() (interface{}, error),
respond func(interface{}) (interface{}, error),
) (interface{}, error)
```
- `conn` is the client connection making the call. It's possible that this value may be `nil` for commands that are being replicated on Follower nodes.
- `cmd` is the command to process.
- `mutate` is the function that handles modifying the node's data.
Passing `nil` indicates that the operation is read-only.
The `interface{}` return value will be passed to the `respond` func.
Returning an error will cancel the operation and the error will be returned to the client.
- `respond` is used for responding to the client connection. It's also used for read-only operations. The `interface{}` param is what was passed from the `mutate` function and may be `nil`.
Returning an error will cancel the operation and the error will be returned to the client.
*Please note that the `Apply()` command is required for modifying or accessing data that is shared on all of the nodes.
Optionally you can forgo the call altogether for operations that are unique to the node.*
Snapshots
---------
All Raft commands are stored in one big log file that will continue to grow. The log is stored on disk, in memory, or both. At some point the server will run out of memory or disk space.
Snapshots allows for truncating the log so that it does not take up all of the server's resources.
The two functions `Snapshot` and `Restore` are used to create a snapshot and restore a snapshot, respectively.
The `Snapshot()` function passes a writer that you can write your snapshot to.
Return `nil` to indicate that you are done writing. Returning an error will cancel the snapshot. If you want to disable snapshots altogether:
```go
func (kvm *Clone) Snapshot(wr io.Writer) error {
return finn.ErrDisabled
}
```
The `Restore()` function passes a reader that you can use to restore your snapshot from.
*Please note that the Raft cluster is active during a snapshot operation.
In the example above we use a read-lock that will force the cluster to delay all writes until the snapshot is complete.
This may not be ideal for your scenario.*
Full-featured Example
---------------------
There's a command line Redis clone that supports all of Finn's features. Print the help options:
```
go run example/clone.go -h
```
First start a single-member cluster:
```
go run example/clone.go
```
This will start the clone listening on port 7481 for client and server-to-server communication.
Next, let's set a single key, and then retrieve it:
```
$ redis-cli -p 7481 SET mykey "my value"
OK
$ redis-cli -p 7481 GET mykey
"my value"
```
Adding members:
```
go run example/clone.go -p 7482 -dir data2 -join :7481
go run example/clone.go -p 7483 -dir data3 -join :7481
```
That's it. Now if node1 goes down, node2 and node3 will continue to operate.
Built-in Raft Commands
----------------------
Here are a few commands for monitoring and managing the cluster:
- **RAFTADDPEER addr**
Adds a new member to the Raft cluster
- **RAFTREMOVEPEER addr**
Removes an existing member
- **RAFTPEERS addr**
Lists known peers and their status
- **RAFTLEADER**
Returns the Raft leader, if known
- **RAFTSNAPSHOT**
Triggers a snapshot operation
- **RAFTSTATE**
Returns the state of the node
- **RAFTSTATS**
Returns information and statistics for the node and cluster
Consistency and Durability
--------------------------
### Write Durability
The `Options.Durability` field has the following options:
- `Low` - fsync is managed by the operating system, less safe
- `Medium` - fsync every second, fast and safer
- `High` - fsync after every write, very durable, slower
### Read Consistency
The `Options.Consistency` field has the following options:
- `Low` - all nodes accept reads, small risk of [stale](http://stackoverflow.com/questions/1563319/what-is-stale-state) data
- `Medium` - only the leader accepts reads, itty-bitty risk of stale data during a leadership change
- `High` - only the leader accepts reads, the raft log index is incremented to guaranteeing no stale data
For example, setting the following options:
```go
opts := finn.Options{
Consistency: High,
Durability: High,
}
n, err := finn.Open("data", ":7481", "", &opts)
```
Provides the highest level of durability and consistency.
Log Backends
------------
Finn supports the following log databases.
- [FastLog](https://github.com/tidwall/raft-fastlog) - log is stored in memory and persists to disk, very fast reads and writes, log is limited to the amount of server memory.
- [LevelDB](https://github.com/syndtr/goleveldb) - log is stored only to disk, supports large logs.
- [Bolt](https://github.com/boltdb/bolt) - log is stored only to disk, supports large logs.
Contact
-------
Josh Baker [@tidwall](http://twitter.com/tidwall)
License
-------
Finn source code is available under the MIT [License](/LICENSE).
================================================
FILE: example/clone.go
================================================
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"sort"
"strings"
"sync"
"github.com/tidwall/finn"
"github.com/tidwall/match"
"github.com/tidwall/redcon"
)
func main() {
var port int
var backend string
var durability string
var consistency string
var loglevel string
var join string
var dir string
flag.IntVar(&port, "p", 7481, "Bind port")
flag.StringVar(&backend, "backend", "fastlog", "Raft log backend [fastlog,bolt,inmem]")
flag.StringVar(&durability, "durability", "medium", "Log durability [low,medium,high]")
flag.StringVar(&consistency, "consistency", "medium", "Raft consistency [low,medium,high]")
flag.StringVar(&loglevel, "loglevel", "notice", "Log level [quiet,warning,notice,verbose,debug]")
flag.StringVar(&dir, "dir", "data", "Data directory")
flag.StringVar(&join, "join", "", "Join a cluster by providing an address")
flag.Parse()
var opts finn.Options
switch strings.ToLower(backend) {
default:
log.Fatalf("invalid backend '%v'", backend)
case "fastlog":
opts.Backend = finn.FastLog
case "bolt":
opts.Backend = finn.Bolt
case "inmem":
opts.Backend = finn.InMem
}
switch strings.ToLower(durability) {
default:
log.Fatalf("invalid durability '%v'", durability)
case "low":
opts.Durability = finn.Low
case "medium":
opts.Durability = finn.Medium
case "high":
opts.Durability = finn.High
}
switch strings.ToLower(consistency) {
default:
log.Fatalf("invalid consistency '%v'", consistency)
case "low":
opts.Consistency = finn.Low
case "medium":
opts.Consistency = finn.Medium
case "high":
opts.Consistency = finn.High
}
switch strings.ToLower(loglevel) {
default:
log.Fatalf("invalid loglevel '%v'", loglevel)
case "quiet":
opts.LogOutput = ioutil.Discard
case "warning":
opts.LogLevel = finn.Warning
case "notice":
opts.LogLevel = finn.Notice
case "verbose":
opts.LogLevel = finn.Verbose
case "debug":
opts.LogLevel = finn.Debug
}
n, err := finn.Open(dir, fmt.Sprintf(":%d", port), join, NewClone(), &opts)
if err != nil {
if opts.LogOutput == ioutil.Discard {
log.Fatal(err)
}
}
defer n.Close()
select {}
}
// Clone represent a Redis clone machine
type Clone struct {
mu sync.RWMutex
keys map[string][]byte
}
// NewClone create a new clone
func NewClone() *Clone {
return &Clone{
keys: make(map[string][]byte),
}
}
// Command processes a command
func (kvm *Clone) Command(m finn.Applier, conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
switch strings.ToLower(string(cmd.Args[0])) {
default:
return nil, finn.ErrUnknownCommand
case "set":
if len(cmd.Args) != 3 {
return nil, finn.ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd,
func() (interface{}, error) {
kvm.mu.Lock()
kvm.keys[string(cmd.Args[1])] = cmd.Args[2]
kvm.mu.Unlock()
return nil, nil
},
func(v interface{}) (interface{}, error) {
conn.WriteString("OK")
return nil, nil
},
)
case "get":
if len(cmd.Args) != 2 {
return nil, finn.ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd, nil,
func(interface{}) (interface{}, error) {
kvm.mu.RLock()
val, ok := kvm.keys[string(cmd.Args[1])]
kvm.mu.RUnlock()
if !ok {
conn.WriteNull()
} else {
conn.WriteBulk(val)
}
return nil, nil
},
)
case "del":
if len(cmd.Args) < 2 {
return nil, finn.ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd,
func() (interface{}, error) {
var n int
kvm.mu.Lock()
for i := 1; i < len(cmd.Args); i++ {
key := string(cmd.Args[i])
if _, ok := kvm.keys[key]; ok {
delete(kvm.keys, key)
n++
}
}
kvm.mu.Unlock()
return n, nil
},
func(v interface{}) (interface{}, error) {
n := v.(int)
conn.WriteInt(n)
return nil, nil
},
)
case "keys":
if len(cmd.Args) != 2 {
return nil, finn.ErrWrongNumberOfArguments
}
pattern := string(cmd.Args[1])
return m.Apply(conn, cmd, nil,
func(v interface{}) (interface{}, error) {
var keys []string
kvm.mu.RLock()
for key := range kvm.keys {
if match.Match(key, pattern) {
keys = append(keys, key)
}
}
kvm.mu.RUnlock()
sort.Strings(keys)
conn.WriteArray(len(keys))
for _, key := range keys {
conn.WriteBulkString(key)
}
return nil, nil
},
)
}
}
// Restore restores a snapshot
func (kvm *Clone) Restore(rd io.Reader) error {
kvm.mu.Lock()
defer kvm.mu.Unlock()
data, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
var keys map[string][]byte
if err := json.Unmarshal(data, &keys); err != nil {
return err
}
kvm.keys = keys
return nil
}
// Snapshot creates a snapshot
func (kvm *Clone) Snapshot(wr io.Writer) error {
kvm.mu.RLock()
defer kvm.mu.RUnlock()
data, err := json.Marshal(kvm.keys)
if err != nil {
return err
}
if _, err := wr.Write(data); err != nil {
return err
}
return nil
}
================================================
FILE: finn.go
================================================
// Package finn provide a fast and simple Raft implementation.
package finn
import (
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/raft"
raftboltdb "github.com/tidwall/raft-boltdb"
raftfastlog "github.com/tidwall/raft-fastlog"
raftleveldb "github.com/tidwall/raft-leveldb"
raftredcon "github.com/tidwall/raft-redcon"
"github.com/tidwall/redcon"
"github.com/tidwall/redlog"
)
var (
// ErrUnknownCommand is returned when the command is not known.
ErrUnknownCommand = errors.New("unknown command")
// ErrWrongNumberOfArguments is returned when the number of arguments is wrong.
ErrWrongNumberOfArguments = errors.New("wrong number of arguments")
// ErrDisabled is returned when a feature is disabled.
ErrDisabled = errors.New("disabled")
)
var (
errInvalidCommand = errors.New("invalid command")
errInvalidConsistencyLevel = errors.New("invalid consistency level")
errSyntaxError = errors.New("syntax error")
errInvalidResponse = errors.New("invalid response")
)
const (
retainSnapshotCount = 2
raftTimeout = 10 * time.Second
)
// Level is for defining the raft consistency level.
type Level int
// String returns a string representation of Level.
func (l Level) String() string {
switch l {
default:
return "unknown"
case Low:
return "low"
case Medium:
return "medium"
case High:
return "high"
}
}
const (
// Low is "low" consistency. All readonly commands will can processed by
// any node. Very fast but may have stale reads.
Low Level = -1
// Medium is "medium" consistency. All readonly commands can only be
// processed by the leader. The command is not processed through the
// raft log, therefore a very small (microseconds) chance for a stale
// read is possible when a leader change occurs. Fast but only the leader
// handles all reads and writes.
Medium Level = 0
// High is "high" consistency. All commands go through the raft log.
// Not as fast because all commands must pass through the raft log.
High Level = 1
)
// Backend is a raft log database type.
type Backend int
const (
// FastLog is a persistent in-memory raft log.
// This is the default.
FastLog Backend = iota
// Bolt is a persistent disk raft log.
Bolt
// InMem is a non-persistent in-memory raft log.
InMem
// LevelDB is a persistent disk raft log.
LevelDB
)
// String returns a string representation of the Backend
func (b Backend) String() string {
switch b {
default:
return "unknown"
case FastLog:
return "fastlog"
case Bolt:
return "bolt"
case InMem:
return "inmem"
case LevelDB:
return "leveldb"
}
}
// LogLevel is used to define the verbosity of the log outputs
type LogLevel int
const (
// Debug prints everything
Debug LogLevel = -2
// Verbose prints extra detail
Verbose LogLevel = -1
// Notice is the standard level
Notice LogLevel = 0
// Warning only prints warnings
Warning LogLevel = 1
)
// Options are used to provide a Node with optional functionality.
type Options struct {
// Consistency is the raft consistency level for reads.
// Default is Medium
Consistency Level
// Durability is the fsync durability for disk writes.
// Default is Medium
Durability Level
// Backend is the database backend.
// Default is MemLog
Backend Backend
// LogLevel is the log verbosity
// Default is Notice
LogLevel LogLevel
// LogOutput is the log writer
// Default is os.Stderr
LogOutput io.Writer
// Accept is an optional function that can be used to
// accept or deny a connection. It fires when new client
// connections are created.
// Return false to deny the connection.
ConnAccept func(redcon.Conn) bool
// ConnClosed is an optional function that fires
// when client connections are closed.
// If there was a network error, then the error will be
// passed in as an argument.
ConnClosed func(redcon.Conn, error)
}
// fillOptions fills in default options
func fillOptions(opts *Options) *Options {
if opts == nil {
opts = &Options{}
}
// copy and reassign the options
nopts := *opts
if nopts.LogOutput == nil {
nopts.LogOutput = os.Stderr
}
return &nopts
}
// Logger is a logger
type Logger interface {
// Printf write notice messages
Printf(format string, args ...interface{})
// Verbosef writes verbose messages
Verbosef(format string, args ...interface{})
// Noticef writes notice messages
Noticef(format string, args ...interface{})
// Warningf write warning messages
Warningf(format string, args ...interface{})
// Debugf writes debug messages
Debugf(format string, args ...interface{})
}
// Applier is used to apply raft commands.
type Applier interface {
// Apply applies a command
Apply(conn redcon.Conn, cmd redcon.Command,
mutate func() (interface{}, error),
respond func(interface{}) (interface{}, error),
) (interface{}, error)
Log() Logger
}
// Machine handles raft commands and raft snapshotting.
type Machine interface {
// Command is called by the Node for incoming commands.
Command(a Applier, conn redcon.Conn, cmd redcon.Command) (interface{}, error)
// Restore is used to restore data from a snapshot.
Restore(rd io.Reader) error
// Snapshot is used to support log compaction. This call should write a
// snapshot to the provided writer.
Snapshot(wr io.Writer) error
}
// Node represents a Raft server node.
type Node struct {
mu sync.RWMutex
addr string
snapshot raft.SnapshotStore
trans *raftredcon.RedconTransport
raft *raft.Raft
log *redlog.Logger // the node logger
mlog *redlog.Logger // the machine logger
closed bool
opts *Options
level Level
handler Machine
store bigStore
peers map[string]string
}
// bigStore represents a raft store that conforms to
// raft.PeerStore, raft.LogStore, and raft.StableStore.
type bigStore interface {
Close() error
FirstIndex() (uint64, error)
LastIndex() (uint64, error)
GetLog(idx uint64, log *raft.Log) error
StoreLog(log *raft.Log) error
StoreLogs(logs []*raft.Log) error
DeleteRange(min, max uint64) error
Set(k, v []byte) error
Get(k []byte) ([]byte, error)
SetUint64(key []byte, val uint64) error
GetUint64(key []byte) (uint64, error)
Peers() ([]string, error)
SetPeers(peers []string) error
}
// Open opens a Raft node and returns the Node to the caller.
func Open(dir, addr, join string, handler Machine, opts *Options) (node *Node, err error) {
opts = fillOptions(opts)
log := redlog.New(opts.LogOutput).Sub('N')
log.SetFilter(redlog.HashicorpRaftFilter)
log.SetIgnoreDups(true)
switch opts.LogLevel {
case Debug:
log.SetLevel(0)
case Verbose:
log.SetLevel(1)
case Notice:
log.SetLevel(2)
case Warning:
log.SetLevel(3)
}
// if this function fails then write the error to the logger
defer func() {
if err != nil {
log.Warningf("%v", err)
}
}()
// create the directory
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
// create a node and assign it some fields
n := &Node{
log: log,
mlog: log.Sub('C'),
opts: opts,
level: opts.Consistency,
handler: handler,
peers: make(map[string]string),
}
var store bigStore
if opts.Backend == Bolt {
opts.Durability = High
store, err = raftboltdb.NewBoltStore(filepath.Join(dir, "raft.db"))
if err != nil {
return nil, err
}
} else if opts.Backend == LevelDB {
var dur raftleveldb.Level
switch opts.Durability {
default:
dur = raftleveldb.Medium
opts.Durability = Medium
case High:
dur = raftleveldb.High
case Low:
dur = raftleveldb.Low
}
store, err = raftleveldb.NewLevelDBStore(filepath.Join(dir, "raft.db"), dur)
if err != nil {
return nil, err
}
} else if opts.Backend == InMem {
opts.Durability = Low
store, err = raftfastlog.NewFastLogStore(":memory:", raftfastlog.Low, n.log.Sub('S'))
if err != nil {
return nil, err
}
} else {
opts.Backend = FastLog
var dur raftfastlog.Level
switch opts.Durability {
default:
dur = raftfastlog.Medium
opts.Durability = Medium
case High:
dur = raftfastlog.High
case Low:
dur = raftfastlog.Low
}
store, err = raftfastlog.NewFastLogStore(filepath.Join(dir, "raft.db"), dur, n.log.Sub('S'))
if err != nil {
return nil, err
}
}
n.store = store
n.log.Debugf("Consistency: %s, Durability: %s, Backend: %s", opts.Consistency, opts.Durability, opts.Backend)
// get the peer list
peers, err := n.store.Peers()
if err != nil {
n.Close()
return nil, err
}
// Setup Raft configuration.
config := raft.DefaultConfig()
config.LogOutput = n.log
// Allow the node to enter single-mode, potentially electing itself, if
// explicitly enabled and there is only 1 node in the cluster already.
if join == "" && len(peers) <= 1 {
n.log.Noticef("Enable single node")
config.EnableSingleNode = true
config.DisableBootstrapAfterElect = false
}
// create the snapshot store. This allows the Raft to truncate the log.
n.snapshot, err = raft.NewFileSnapshotStore(dir, retainSnapshotCount, n.log)
if err != nil {
n.Close()
return nil, err
}
// verify the syntax of the address.
taddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
n.Close()
return nil, err
}
// Set the atomic flag which indicates that we can accept Redcon commands.
var doReady uint64
// start the raft server
n.addr = taddr.String()
n.trans, err = raftredcon.NewRedconTransport(
n.addr,
func(conn redcon.Conn, cmd redcon.Command) {
if atomic.LoadUint64(&doReady) != 0 {
n.doCommand(conn, cmd)
} else {
conn.WriteError("ERR raft not ready")
}
}, opts.ConnAccept, opts.ConnClosed,
n.log.Sub('L'),
)
if err != nil {
n.Close()
return nil, err
}
// Instantiate the Raft systems.
n.raft, err = raft.NewRaft(config, (*nodeFSM)(n),
n.store, n.store, n.snapshot, n.store, n.trans)
if err != nil {
n.Close()
return nil, err
}
// set the atomic flag which indicates that we can accept Redcon commands.
atomic.AddUint64(&doReady, 1)
// if --join was specified, make the join request.
for {
if join != "" && len(peers) == 0 {
if err := reqRaftJoin(join, n.addr); err != nil {
if strings.HasPrefix(err.Error(), "TRY ") {
// we received a "TRY addr" response. let forward the join to
// the specified address"
join = strings.Split(err.Error(), " ")[1]
continue
}
return nil, fmt.Errorf("failed to join node at %v: %v", join, err)
}
}
break
}
go n.watchPeers()
return n, nil
}
// Close closes the node
func (n *Node) Close() error {
n.mu.Lock()
defer n.mu.Unlock()
// shutdown the raft, but do not handle the future error. :PPA:
if n.raft != nil {
n.raft.Shutdown().Error()
}
if n.trans != nil {
n.trans.Close()
}
// close the raft database
if n.store != nil {
n.store.Close()
}
n.closed = true
return nil
}
// Store returns the underlying storage object.
func (n *Node) Store() interface{} {
return n.store
}
func (n *Node) watchPeers() {
buf := make([]byte, 1024)
for {
var peers []string
var err error
if !func() bool {
n.mu.Lock()
defer n.mu.Unlock()
if n.closed {
return false
}
peers, err = n.store.Peers()
return true
}() {
return
}
func() {
if err != nil {
return
}
peersState := make(map[string]string)
for _, peer := range peers {
state, err := func() (string, error) {
conn, err := net.DialTimeout("tcp", peer, time.Second)
if err != nil {
return "", err
}
defer conn.Close()
if err := conn.SetDeadline(time.Now().Add(time.Second)); err != nil {
return "", err
}
if _, err := conn.Write([]byte("RAFTSTATE\r\n")); err != nil {
return "", err
}
n, err := conn.Read(buf)
if err != nil {
return "", err
}
parts := strings.Split(string(buf[:n]), "\r\n")
if len(parts) != 3 || buf[0] != '$' {
return "", err
}
return parts[1], nil
}()
if err, ok := err.(net.Error); ok && err.Timeout() {
state = "Timeout"
} else if err != nil {
state = "Invalid"
}
peersState[peer] = state
}
n.mu.Lock()
if !n.closed {
n.peers = peersState
}
n.mu.Unlock()
}()
time.Sleep(time.Second)
}
}
// Log returns the active logger for printing messages
func (n *Node) Log() Logger {
return n.mlog
}
// leader returns the client address for the leader
func (n *Node) leader() string {
return n.raft.Leader()
}
// reqRaftJoin does a remote "RAFTJOIN" command at the specified address.
func reqRaftJoin(join, raftAddr string) error {
resp, _, err := raftredcon.Do(join, nil, []byte("raftaddpeer"), []byte(raftAddr))
if err != nil {
return err
}
if string(resp) != "OK" {
return errors.New("invalid response")
}
return nil
}
// scanForErrors returns pipeline errors. All messages must be errors
func scanForErrors(buf []byte) [][]byte {
var res [][]byte
for len(buf) > 0 {
if buf[0] != '-' {
return nil
}
buf = buf[1:]
for i := 0; i < len(buf); i++ {
if buf[i] == '\n' && i > 0 && buf[i-1] == '\r' {
res = append(res, buf[:i-1])
buf = buf[i+1:]
break
}
}
}
return res
}
func (n *Node) translateError(err error, cmd string) string {
if err.Error() == ErrDisabled.Error() || err.Error() == ErrUnknownCommand.Error() {
return "ERR unknown command '" + cmd + "'"
} else if err.Error() == ErrWrongNumberOfArguments.Error() {
return "ERR wrong number of arguments for '" + cmd + "' command"
} else if err.Error() == raft.ErrNotLeader.Error() {
leader := n.raft.Leader()
if leader == "" {
return "ERR leader not known"
}
return "TRY " + leader
}
return strings.TrimSpace(strings.Split(err.Error(), "\n")[0])
}
// doCommand executes a client command which is processed through the raft pipeline.
func (n *Node) doCommand(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) == 0 {
return nil, nil
}
var val interface{}
var err error
switch strings.ToLower(string(cmd.Args[0])) {
default:
val, err = n.handler.Command((*nodeApplier)(n), conn, cmd)
if err == ErrDisabled {
err = ErrUnknownCommand
}
case "raftaddpeer":
val, err = n.doRaftAddPeer(conn, cmd)
case "raftremovepeer":
val, err = n.doRaftRemovePeer(conn, cmd)
case "raftleader":
val, err = n.doRaftLeader(conn, cmd)
case "raftsnapshot":
val, err = n.doRaftSnapshot(conn, cmd)
case "raftshrinklog":
val, err = n.doRaftShrinkLog(conn, cmd)
case "raftstate":
val, err = n.doRaftState(conn, cmd)
case "raftstats":
val, err = n.doRaftStats(conn, cmd)
case "raftpeers":
val, err = n.doRaftPeers(conn, cmd)
case "quit":
val, err = n.doQuit(conn, cmd)
case "ping":
val, err = n.doPing(conn, cmd)
}
if err != nil && conn != nil {
// it's possible that this was a pipelined response.
wr := redcon.BaseWriter(conn)
if wr != nil {
buf := wr.Buffer()
rerrs := scanForErrors(buf)
if len(rerrs) > 0 {
wr.SetBuffer(nil)
for _, rerr := range rerrs {
conn.WriteError(n.translateError(errors.New(string(rerr)), string(cmd.Args[0])))
}
}
}
conn.WriteError(n.translateError(err, string(cmd.Args[0])))
}
return val, err
}
// doPing handles a "PING" client command.
func (n *Node) doPing(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
switch len(cmd.Args) {
default:
return nil, ErrWrongNumberOfArguments
case 1:
conn.WriteString("PONG")
case 2:
conn.WriteBulk(cmd.Args[1])
}
return nil, nil
}
// doRaftLeader handles a "RAFTLEADER" client command.
func (n *Node) doRaftLeader(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 1 {
return nil, ErrWrongNumberOfArguments
}
leader := n.raft.Leader()
if leader == "" {
conn.WriteNull()
} else {
conn.WriteBulkString(leader)
}
return nil, nil
}
// doRaftSnapshot handles a "RAFTSNAPSHOT" client command.
func (n *Node) doRaftSnapshot(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 1 {
return nil, ErrWrongNumberOfArguments
}
f := n.raft.Snapshot()
err := f.Error()
if err != nil {
conn.WriteError("ERR " + err.Error())
return nil, nil
}
conn.WriteString("OK")
return nil, nil
}
type shrinkable interface {
Shrink() error
}
// doRaftShrinkLog handles a "RAFTSHRINKLOG" client command.
func (n *Node) doRaftShrinkLog(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 1 {
return nil, ErrWrongNumberOfArguments
}
if s, ok := n.store.(shrinkable); ok {
err := s.Shrink()
if err != nil {
conn.WriteError("ERR " + err.Error())
return nil, nil
}
conn.WriteString("OK")
return nil, nil
}
conn.WriteError("ERR log is not shrinkable")
return nil, nil
}
// doRaftState handles a "RAFTSTATE" client command.
func (n *Node) doRaftState(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 1 {
return nil, ErrWrongNumberOfArguments
}
conn.WriteBulkString(n.raft.State().String())
return nil, nil
}
// doRaftStatus handles a "RAFTSTATUS" client command.
func (n *Node) doRaftStats(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 1 {
return nil, ErrWrongNumberOfArguments
}
n.mu.RLock()
defer n.mu.RUnlock()
stats := n.raft.Stats()
keys := make([]string, 0, len(stats))
for key := range stats {
keys = append(keys, key)
}
sort.Strings(keys)
conn.WriteArray(len(keys) * 2)
for _, key := range keys {
conn.WriteBulkString(key)
conn.WriteBulkString(stats[key])
}
return nil, nil
}
// doRaftStatus handles a "RAFTSTATUS" client command.
func (n *Node) doRaftPeers(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 1 {
return nil, ErrWrongNumberOfArguments
}
var peers []string
peersState := make(map[string]string)
func() {
n.mu.RLock()
defer n.mu.RUnlock()
for peer, state := range n.peers {
peersState[peer] = state
peers = append(peers, peer)
}
}()
sort.Strings(peers)
conn.WriteArray(len(peers) * 2)
for _, peer := range peers {
conn.WriteBulkString(peer)
conn.WriteBulkString(peersState[peer])
}
return nil, nil
}
// doQuit handles a "QUIT" client command.
func (n *Node) doQuit(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
conn.WriteString("OK")
conn.Close()
return nil, nil
}
// doRaftAddPeer handles a "RAFTADDPEER address" client command.
func (n *Node) doRaftAddPeer(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 2 {
return nil, ErrWrongNumberOfArguments
}
n.log.Noticef("Received add peer request from %v", string(cmd.Args[1]))
f := n.raft.AddPeer(string(cmd.Args[1]))
if f.Error() != nil {
return nil, f.Error()
}
n.log.Noticef("Node %v added successfully", string(cmd.Args[1]))
conn.WriteString("OK")
return nil, nil
}
// doRaftRemovePeer handles a "RAFTREMOVEPEER address" client command.
func (n *Node) doRaftRemovePeer(conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
if len(cmd.Args) != 2 {
return nil, ErrWrongNumberOfArguments
}
n.log.Noticef("Received remove peer request from %v", string(cmd.Args[1]))
f := n.raft.RemovePeer(string(cmd.Args[1]))
if f.Error() != nil {
return nil, f.Error()
}
n.log.Noticef("Node %v detached successfully", string(cmd.Args[1]))
conn.WriteString("OK")
return nil, nil
}
// raftApplyCommand encodes a series of args into a raft command and
// applies it to the index.
func (n *Node) raftApplyCommand(cmd redcon.Command) (interface{}, error) {
f := n.raft.Apply(cmd.Raw, raftTimeout)
if err := f.Error(); err != nil {
return nil, err
}
// we check for the response to be an error and return it as such.
switch v := f.Response().(type) {
default:
return v, nil
case error:
return nil, v
}
}
// raftLevelGuard is used to process readonly commands depending on the
// consistency readonly level.
// It either:
// - low consistency: just processes the command without concern about
// leadership or cluster state.
// - medium consistency: makes sure that the node is the leader first.
// - high consistency: sends a blank command through the raft pipeline to
// ensure that the node is thel leader, the raft index is incremented, and
// that the cluster is sane before processing the readonly command.
func (n *Node) raftLevelGuard() error {
switch n.level {
default:
// a valid level is required
return errInvalidConsistencyLevel
case Low:
// anything goes.
return nil
case Medium:
// must be the leader
if n.raft.State() != raft.Leader {
return raft.ErrNotLeader
}
return nil
case High:
// process a blank command. this will update the raft log index
// and allow for readonly commands to process in order without
// serializing the actual command.
f := n.raft.Apply(nil, raftTimeout)
if err := f.Error(); err != nil {
return err
}
// the blank command succeeded.
v := f.Response()
// check if response was an error and return that.
switch v := v.(type) {
case nil:
return nil
case error:
return v
}
return errInvalidResponse
}
}
// nodeApplier exposes the Applier interface of the Node type
type nodeApplier Node
// Apply executes a command through raft.
// The mutate param should be set to nil for readonly commands.
// The repsond param is required and any response to conn happens here.
// The return value from mutate will be passed into the respond param.
func (m *nodeApplier) Apply(
conn redcon.Conn,
cmd redcon.Command,
mutate func() (interface{}, error),
respond func(interface{}) (interface{}, error),
) (interface{}, error) {
var val interface{}
var err error
if mutate == nil {
// no apply, just do a level guard.
if err := (*Node)(m).raftLevelGuard(); err != nil {
return nil, err
}
} else if conn == nil {
// this is happening on a follower node.
return mutate()
} else {
// this is happening on the leader node.
// apply the command to the raft log.
val, err = (*Node)(m).raftApplyCommand(cmd)
}
if err != nil {
return nil, err
}
// responde
return respond(val)
}
// Log returns the active logger for printing messages
func (m *nodeApplier) Log() Logger {
return (*Node)(m).Log()
}
// nodeFSM exposes the raft.FSM interface of the Node type
type nodeFSM Node
// Apply applies a Raft log entry to the key-value store.
func (m *nodeFSM) Apply(l *raft.Log) interface{} {
if len(l.Data) == 0 {
// blank data
return nil
}
cmd, err := redcon.Parse(l.Data)
if err != nil {
return err
}
val, err := (*Node)(m).doCommand(nil, cmd)
if err != nil {
return err
}
return val
}
// Restore stores the key-value store to a previous state.
func (m *nodeFSM) Restore(rc io.ReadCloser) error {
defer rc.Close()
return (*Node)(m).handler.Restore(rc)
}
// Persist writes the snapshot to the given sink.
func (m *nodeFSM) Persist(sink raft.SnapshotSink) error {
if err := (*Node)(m).handler.Snapshot(sink); err != nil {
sink.Cancel()
return err
}
sink.Close()
return nil
}
// Release deletes the temp file
func (m *nodeFSM) Release() {}
// Snapshot returns a snapshot of the key-value store.
func (m *nodeFSM) Snapshot() (raft.FSMSnapshot, error) {
return m, nil
}
================================================
FILE: finn_test.go
================================================
package finn
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/tidwall/raft-redcon"
"github.com/tidwall/redcon"
)
type KVM struct {
mu sync.RWMutex
keys map[string][]byte
}
func NewKVM() *KVM {
return &KVM{
keys: make(map[string][]byte),
}
}
func (kvm *KVM) Command(m Applier, conn redcon.Conn, cmd redcon.Command) (interface{}, error) {
switch strings.ToLower(string(cmd.Args[0])) {
default:
return nil, ErrUnknownCommand
case "set":
if len(cmd.Args) != 3 {
return nil, ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd,
func() (interface{}, error) {
kvm.mu.Lock()
defer kvm.mu.Unlock()
kvm.keys[string(cmd.Args[1])] = cmd.Args[2]
return nil, nil
},
func(v interface{}) (interface{}, error) {
conn.WriteString("OK")
return nil, nil
},
)
case "get":
if len(cmd.Args) != 2 {
return nil, ErrWrongNumberOfArguments
}
return m.Apply(conn, cmd,
nil,
func(interface{}) (interface{}, error) {
kvm.mu.RLock()
defer kvm.mu.RUnlock()
if val, ok := kvm.keys[string(cmd.Args[1])]; !ok {
conn.WriteNull()
} else {
conn.WriteBulk(val)
}
return nil, nil
},
)
}
}
func (kvm *KVM) Restore(rd io.Reader) error {
kvm.mu.Lock()
defer kvm.mu.Unlock()
data, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
var keys map[string][]byte
if err := json.Unmarshal(data, &keys); err != nil {
return err
}
kvm.keys = keys
return nil
}
func (kvm *KVM) Snapshot(wr io.Writer) error {
kvm.mu.RLock()
defer kvm.mu.RUnlock()
data, err := json.Marshal(kvm.keys)
if err != nil {
return err
}
if _, err := wr.Write(data); err != nil {
return err
}
return nil
}
var killed = make(map[int]bool)
var killCond = sync.NewCond(&sync.Mutex{})
func killNodes(basePort int) {
killCond.L.Lock()
killed[basePort] = true
killCond.Broadcast()
killCond.L.Unlock()
}
func startTestNode(t testing.TB, basePort int, num int, opts *Options) {
node := fmt.Sprintf("%d", num)
if err := os.MkdirAll("data/"+node, 0700); err != nil {
t.Fatal(err)
}
join := ""
if node == "" {
node = "0"
}
addr := fmt.Sprintf(":%d", basePort/10*10+num)
if node != "0" {
join = fmt.Sprintf(":%d", basePort)
}
n, err := Open("data/"+node, addr, join, NewKVM(), opts)
if err != nil {
t.Fatal(err)
}
defer n.Close()
for {
killCond.L.Lock()
if killed[basePort] {
killCond.L.Unlock()
return
}
killCond.Wait()
killCond.L.Unlock()
}
}
func waitFor(t testing.TB, basePort, node int) {
target := fmt.Sprintf(":%d", basePort/10*10+node)
start := time.Now()
for {
if time.Now().Sub(start) > time.Second*10 {
t.Fatal("timeout looking for leader")
}
time.Sleep(time.Second / 4)
resp, _, err := raftredcon.Do(target, nil, []byte("raftleader"))
if err != nil {
continue
}
if len(resp) != 0 {
return
}
}
}
func testDo(t testing.TB, basePort, node int, expect string, args ...string) string {
var bargs [][]byte
for _, arg := range args {
bargs = append(bargs, []byte(arg))
}
target := fmt.Sprintf(":%d", basePort/10*10+node)
resp, _, err := raftredcon.Do(target, nil, bargs...)
if err != nil {
if err.Error() == expect {
return ""
}
t.Fatalf("node %d: %v", node, err)
}
if expect != "???" && string(resp) != expect {
t.Fatalf("node %d: expected '%v', got '%v'", node, expect, string(resp))
}
return string(resp)
}
func TestVarious(t *testing.T) {
t.Run("Level", SubTestLevel)
t.Run("Backend", SubTestBackend)
}
func SubTestLevel(t *testing.T) {
var level Level
level = Level(-99)
if level.String() != "unknown" {
t.Fatalf("expecting '%v', got '%v'", "unknown", level.String())
}
level = Low
if level.String() != "low" {
t.Fatalf("expecting '%v', got '%v'", "low", level.String())
}
level = Medium
if level.String() != "medium" {
t.Fatalf("expecting '%v', got '%v'", "medium", level.String())
}
level = High
if level.String() != "high" {
t.Fatalf("expecting '%v', got '%v'", "high", level.String())
}
}
func SubTestBackend(t *testing.T) {
var backend Backend
backend = Backend(-99)
if backend.String() != "unknown" {
t.Fatalf("expecting '%v', got '%v'", "unknown", backend.String())
}
backend = FastLog
if backend.String() != "fastlog" {
t.Fatalf("expecting '%v', got '%v'", "fastlog", backend.String())
}
backend = Bolt
if backend.String() != "bolt" {
t.Fatalf("expecting '%v', got '%v'", "bolt", backend.String())
}
backend = LevelDB
if backend.String() != "leveldb" {
t.Fatalf("expecting '%v', got '%v'", "leveldb", backend.String())
}
backend = InMem
if backend.String() != "inmem" {
t.Fatalf("expecting '%v', got '%v'", "inmem", backend.String())
}
}
func TestCluster(t *testing.T) {
var optsArr []Options
for _, backend := range []Backend{LevelDB, Bolt, FastLog, InMem} {
for _, consistency := range []Level{Low, Medium, High} {
optsArr = append(optsArr, Options{
Backend: backend,
Consistency: consistency,
})
}
}
for i := 0; i < len(optsArr); i++ {
func() {
opts := optsArr[i]
if os.Getenv("LOG") != "1" {
opts.LogOutput = ioutil.Discard
}
basePort := (7480/10 + i) * 10
tag := fmt.Sprintf("%v-%v-%d", opts.Backend, opts.Consistency, basePort)
t.Logf("%s", tag)
t.Run(tag, func(t *testing.T) {
os.RemoveAll("data")
defer os.RemoveAll("data")
defer killNodes(basePort)
for i := 0; i < 3; i++ {
go startTestNode(t, basePort, i, &opts)
waitFor(t, basePort, i)
}
t.Run("Leader", func(t *testing.T) { SubTestLeader(t, basePort, &opts) })
t.Run("Set", func(t *testing.T) { SubTestSet(t, basePort, &opts) })
t.Run("Get", func(t *testing.T) { SubTestGet(t, basePort, &opts) })
t.Run("Snapshot", func(t *testing.T) { SubTestSnapshot(t, basePort, &opts) })
t.Run("Ping", func(t *testing.T) { SubTestPing(t, basePort, &opts) })
t.Run("RaftShrinkLog", func(t *testing.T) { SubTestRaftShrinkLog(t, basePort, &opts) })
t.Run("RaftStats", func(t *testing.T) { SubTestRaftStats(t, basePort, &opts) })
t.Run("RaftState", func(t *testing.T) { SubTestRaftState(t, basePort, &opts) })
t.Run("AddPeer", func(t *testing.T) { SubTestAddPeer(t, basePort, &opts) })
t.Run("RemovePeer", func(t *testing.T) { SubTestRemovePeer(t, basePort, &opts) })
})
}()
}
}
func SubTestLeader(t *testing.T, basePort int, opts *Options) {
baseAddr := fmt.Sprintf(":%d", basePort)
testDo(t, basePort, 0, baseAddr, "raftleader")
testDo(t, basePort, 1, baseAddr, "raftleader")
testDo(t, basePort, 2, baseAddr, "raftleader")
}
func SubTestSet(t *testing.T, basePort int, opts *Options) {
baseAddr := fmt.Sprintf(":%d", basePort)
testDo(t, basePort, 0, "OK", "set", "hello", "world")
testDo(t, basePort, 1, "TRY "+baseAddr, "set", "hello", "world")
testDo(t, basePort, 2, "TRY "+baseAddr, "set", "hello", "world")
}
func SubTestGet(t *testing.T, basePort int, opts *Options) {
baseAddr := fmt.Sprintf(":%d", basePort)
testDo(t, basePort, 0, "world", "get", "hello")
testDo(t, basePort, 1, "TRY "+baseAddr, "set", "hello", "world")
testDo(t, basePort, 2, "TRY "+baseAddr, "set", "hello", "world")
}
func SubTestPing(t *testing.T, basePort int, opts *Options) {
for i := 0; i < 3; i++ {
testDo(t, basePort, i, "PONG", "ping")
testDo(t, basePort, i, "HELLO", "ping", "HELLO")
testDo(t, basePort, i, "ERR wrong number of arguments for 'ping' command", "ping", "HELLO", "WORLD")
}
}
func SubTestRaftShrinkLog(t *testing.T, basePort int, opts *Options) {
for i := 0; i < 3; i++ {
if opts.Backend == Bolt || opts.Backend == LevelDB {
testDo(t, basePort, i, "ERR log is not shrinkable", "raftshrinklog")
} else {
testDo(t, basePort, i, "OK", "raftshrinklog")
}
testDo(t, basePort, i, "ERR wrong number of arguments for 'raftshrinklog' command", "raftshrinklog", "abc")
}
}
func SubTestRaftStats(t *testing.T, basePort int, opts *Options) {
for i := 0; i < 3; i++ {
resp := testDo(t, basePort, i, "???", "raftstats")
if !strings.Contains(resp, "applied_index") || !strings.Contains(resp, "num_peers") {
t.Fatal("expected values")
}
testDo(t, basePort, i, "ERR wrong number of arguments for 'raftstats' command", "raftstats", "abc")
}
}
func SubTestRaftState(t *testing.T, basePort int, opts *Options) {
for i := 0; i < 3; i++ {
if i == 0 {
testDo(t, basePort, i, "Leader", "raftstate")
} else {
testDo(t, basePort, i, "Follower", "raftstate")
}
testDo(t, basePort, i, "ERR wrong number of arguments for 'raftstate' command", "raftstate", "abc")
}
}
func SubTestSnapshot(t *testing.T, basePort int, opts *Options) {
// insert 1000 items
for i := 0; i < 1000; i++ {
testDo(t, basePort, 0, "OK", "set", fmt.Sprintf("key:%d", i), fmt.Sprintf("val:%d", i))
}
testDo(t, basePort, 0, "OK", "raftsnapshot")
testDo(t, basePort, 1, "OK", "raftsnapshot")
testDo(t, basePort, 2, "OK", "raftsnapshot")
}
func SubTestAddPeer(t *testing.T, basePort int, opts *Options) {
baseAddr := fmt.Sprintf(":%d", basePort)
go startTestNode(t, basePort, 3, opts)
waitFor(t, basePort, 3)
testDo(t, basePort, 3, baseAddr, "raftleader")
testDo(t, basePort, 3, "TRY "+baseAddr, "set", "hello", "world")
testDo(t, basePort, 3, "OK", "raftsnapshot")
}
func SubTestRemovePeer(t *testing.T, basePort int, opts *Options) {
baseAddr := fmt.Sprintf(":%d", basePort)
testDo(t, basePort, 1, "TRY "+baseAddr, "raftremovepeer", fmt.Sprintf(":%d3", basePort/10))
testDo(t, basePort, 0, "OK", "raftremovepeer", fmt.Sprintf(":%d3", basePort/10))
testDo(t, basePort, 0, "peer is unknown", "raftremovepeer", fmt.Sprintf(":%d3", basePort/10))
}
func BenchmarkCluster(t *testing.B) {
os.RemoveAll("data")
defer os.RemoveAll("data")
for i := 0; i < 3; i++ {
go startTestNode(t, 7480, i, &Options{LogOutput: ioutil.Discard})
waitFor(t, 7480, i)
}
t.Run("PL", func(t *testing.B) {
pl := []int{1, 4, 16, 64}
for i := 0; i < len(pl); i++ {
func(pl int) {
t.Run(fmt.Sprintf("%d", pl), func(t *testing.B) {
t.Run("Ping", func(t *testing.B) { SubBenchmarkPing(t, pl) })
t.Run("Set", func(t *testing.B) { SubBenchmarkSet(t, pl) })
t.Run("Get", func(t *testing.B) { SubBenchmarkGet(t, pl) })
})
}(pl[i])
}
})
}
func testDial(t testing.TB, node int) (net.Conn, *bufio.ReadWriter) {
conn, err := net.Dial("tcp", fmt.Sprintf(":748%d", node))
if err != nil {
t.Fatal(err)
}
return conn, bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
}
func buildCommand(args ...string) []byte {
var buf []byte
buf = append(buf, '*')
buf = append(buf, strconv.FormatInt(int64(len(args)), 10)...)
buf = append(buf, '\r', '\n')
for _, arg := range args {
buf = append(buf, '$')
buf = append(buf, strconv.FormatInt(int64(len(arg)), 10)...)
buf = append(buf, '\r', '\n')
buf = append(buf, arg...)
buf = append(buf, '\r', '\n')
}
return buf
}
func testConnDo(t testing.TB, rw *bufio.ReadWriter, pl int, expect string, cmd []byte) {
for i := 0; i < pl; i++ {
rw.Write(cmd)
}
if err := rw.Flush(); err != nil {
t.Fatal(err)
}
buf := make([]byte, len(expect))
for i := 0; i < pl; i++ {
if _, err := io.ReadFull(rw, buf); err != nil {
t.Fatal(err)
}
if string(buf) != expect {
t.Fatalf("expected '%v', got '%v'", expect, string(buf))
}
}
}
func SubBenchmarkPing(t *testing.B, pipeline int) {
conn, rw := testDial(t, 0)
defer conn.Close()
t.ResetTimer()
for i := 0; i < t.N; i += pipeline {
n := pipeline
if t.N-i < pipeline {
n = t.N - i
}
testConnDo(t, rw, n, "+PONG\r\n", []byte("*1\r\n$4\r\nPING\r\n"))
}
}
func SubBenchmarkSet(t *testing.B, pipeline int) {
conn, rw := testDial(t, 0)
defer conn.Close()
t.ResetTimer()
for i := 0; i < t.N; i += pipeline {
n := pipeline
if t.N-i < pipeline {
n = t.N - i
}
testConnDo(t, rw, n, "+OK\r\n", buildCommand("set", fmt.Sprintf("key:%d", i), fmt.Sprintf("val:%d", i)))
}
}
func SubBenchmarkGet(t *testing.B, pipeline int) {
conn, rw := testDial(t, 0)
defer conn.Close()
t.ResetTimer()
for i := 0; i < t.N; i += pipeline {
n := pipeline
if t.N-i < pipeline {
n = t.N - i
}
testConnDo(t, rw, n, "$-1\r\n", buildCommand("get", "key:na"))
}
}
================================================
FILE: go.mod
================================================
module github.com/tidwall/finn
go 1.13
require (
github.com/armon/go-metrics v0.3.0 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/garyburd/redigo v1.6.0 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/raft v0.1.0
github.com/syndtr/goleveldb v1.0.0 // indirect
github.com/tidwall/match v1.0.1 // indirect
github.com/tidwall/raft-boltdb v0.0.0-20160909211738-25b87f2c5677
github.com/tidwall/raft-fastlog v0.0.0-20160922202426-2f0d0a0ce558
github.com/tidwall/raft-leveldb v0.0.0-20170127185243-ada471496dc9
github.com/tidwall/raft-redcon v0.1.0
github.com/tidwall/redcon v1.0.0
github.com/tidwall/redlog v0.0.0-20180507234857-bbed90f29893
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
)
================================================
FILE: go.sum
================================================
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/raft v0.0.0-20160824023112-5f09c4ffdbcd h1:gN6xm3iAclW5DKJWYiXO8tZN25Zy7UsB6Wh/85OB8Bg=
github.com/hashicorp/raft v0.0.0-20160824023112-5f09c4ffdbcd/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
github.com/hashicorp/raft v0.1.0 h1:OC+j7LWkv7x8s9c5wnXCEgtP1J0LDw2fKNxUiYCZFNo=
github.com/hashicorp/raft v0.1.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=
github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/raft v1.0.0 h1:XuXkumePQBVcBYtfC57f7dXRM4WBZei3lZkK06+0D8I=
github.com/tidwall/raft v1.0.0/go.mod h1:uMALL7ToL5LoHWCGwLE1uROh9W0ormbwrdxg2uSo7Oo=
github.com/tidwall/raft-boltdb v0.0.0-20160909211738-25b87f2c5677 h1:8FkXr+GCV4wb8WAct/V1vKB/Ivy11Y+fm919EHgdfWA=
github.com/tidwall/raft-boltdb v0.0.0-20160909211738-25b87f2c5677/go.mod h1:O7b2tvwZmC+IFu8djLOZj0jc/tjssDPiJ8xIt+U2jTU=
github.com/tidwall/raft-boltdb v0.0.0-20180905173017-ae4e25b230d8 h1:D9uqhiFILz+qx8y2LtX70pDvjCYXEghnUTZ934M+fuY=
github.com/tidwall/raft-boltdb v0.0.0-20180905173017-ae4e25b230d8/go.mod h1:O7b2tvwZmC+IFu8djLOZj0jc/tjssDPiJ8xIt+U2jTU=
github.com/tidwall/raft-fastlog v0.0.0-20160922202426-2f0d0a0ce558 h1:hQYEIfMzrH6LRzjz7Jp5Rv8jrty1bAR5M0DjOYSxxks=
github.com/tidwall/raft-fastlog v0.0.0-20160922202426-2f0d0a0ce558/go.mod h1:KNwBhka/a5Ucw5bfEzKHTEKuCO2Do1tKs+kDdu3Sbb4=
github.com/tidwall/raft-fastlog v0.0.0-20190329194628-f798a12ed2b3 h1:Km24Wbatpk4a0cQlmW1lGvyjzDD2biQlaqtqR1G7Cic=
github.com/tidwall/raft-fastlog v0.0.0-20190329194628-f798a12ed2b3/go.mod h1:KNwBhka/a5Ucw5bfEzKHTEKuCO2Do1tKs+kDdu3Sbb4=
github.com/tidwall/raft-leveldb v0.0.0-20170127185243-ada471496dc9 h1:Z5QMqF/MSuvnrTibHqs/xx+ZE5gypLV02YU8Ry4kJ7A=
github.com/tidwall/raft-leveldb v0.0.0-20170127185243-ada471496dc9/go.mod h1:KNAMyK8s/oUOTbIL/T07fTL6/EgJfHhK8XeeEPq35eU=
github.com/tidwall/raft-leveldb v0.0.0-20180905172604-d81b19dd795a h1:wSOV25XXv0kdoWUEqCYEgaPAgWm5mdi3c1wkisYdQaM=
github.com/tidwall/raft-leveldb v0.0.0-20180905172604-d81b19dd795a/go.mod h1:KNAMyK8s/oUOTbIL/T07fTL6/EgJfHhK8XeeEPq35eU=
github.com/tidwall/raft-leveldb v0.0.0-20190319171839-8607dc18110d h1:DypM2TD6Pdev1QH5WrwLO09jB1oq7KvWpYvnqCS3Vow=
github.com/tidwall/raft-leveldb v0.0.0-20190319171839-8607dc18110d/go.mod h1:KNAMyK8s/oUOTbIL/T07fTL6/EgJfHhK8XeeEPq35eU=
github.com/tidwall/raft-redcon v0.1.0 h1:qwYaFaAVNFleY2EFm0j7UK4vEpoNa19ohH7U4idbg+s=
github.com/tidwall/raft-redcon v0.1.0/go.mod h1:YhoECfJs8MXbrwak9H7wKYDMZ3rMaB7el7zZ7MRw9Xw=
github.com/tidwall/redcon v1.0.0 h1:D4AzzJ81Afeh144fgnj5H0aSVPBBJ5RI9Rzj0zThU+E=
github.com/tidwall/redcon v1.0.0/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
github.com/tidwall/redlog v0.0.0-20180507234857-bbed90f29893 h1:aGyVYs0o1pThR9i+SuYCG/VqWibHkUXl9kIMZGhAXDw=
github.com/tidwall/redlog v0.0.0-20180507234857-bbed90f29893/go.mod h1:NssoNA+Uwqd5WHKkVwAzO7AT6VuG3wiC8r5nBqds3Ao=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=