[lxc-devel] [lxd/master] vendor: Temporary Raft vendoring

stgraber on Github lxc-bot at linuxcontainers.org
Tue May 21 18:49:28 UTC 2019


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 354 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20190521/688b5175/attachment-0001.bin>
-------------- next part --------------
From 196b526dbf3beb61e7d7298435b94010127fb8e7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgraber at ubuntu.com>
Date: Tue, 21 May 2019 14:47:17 -0400
Subject: [PATCH] vendor: Temporary Raft vendoring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgraber at ubuntu.com>
---
 .../github.com/CanonicalLtd/go-dqlite/AUTHORS |    1 +
 .../github.com/CanonicalLtd/go-dqlite/LICENSE |  201 ++
 .../CanonicalLtd/go-dqlite/Makefile           |    4 +
 .../CanonicalLtd/go-dqlite/README.md          |   99 +
 .../CanonicalLtd/go-dqlite/cluster.go         |  123 +
 .../go-dqlite/cmd/dqlite/bench.go             |  130 +
 .../go-dqlite/cmd/dqlite/delete.go            |   40 +
 .../CanonicalLtd/go-dqlite/cmd/dqlite/dump.go |   78 +
 .../CanonicalLtd/go-dqlite/cmd/dqlite/main.go |   12 +
 .../CanonicalLtd/go-dqlite/cmd/dqlite/root.go |   26 +
 .../CanonicalLtd/go-dqlite/config.go          |   31 +
 .../github.com/CanonicalLtd/go-dqlite/doc.go  |   17 +
 .../CanonicalLtd/go-dqlite/driver.go          |  646 +++++
 .../github.com/CanonicalLtd/go-dqlite/fsm.go  |   17 +
 .../go-dqlite/internal/bindings/build.go      |    6 +
 .../go-dqlite/internal/bindings/cluster.go    |  249 ++
 .../go-dqlite/internal/bindings/config.go     |   39 +
 .../go-dqlite/internal/bindings/conn.go       |  132 +
 .../go-dqlite/internal/bindings/datatype.go   |   23 +
 .../go-dqlite/internal/bindings/errors.go     |   58 +
 .../go-dqlite/internal/bindings/logger.go     |  117 +
 .../go-dqlite/internal/bindings/server.go     |  223 ++
 .../go-dqlite/internal/bindings/status.go     |   43 +
 .../go-dqlite/internal/bindings/testing.go    |   24 +
 .../go-dqlite/internal/bindings/vfs.go        |  106 +
 .../go-dqlite/internal/bindings/wal.go        |   41 +
 .../internal/bindings/wal_replication.go      |  409 +++
 .../go-dqlite/internal/client/buffer.go       |   11 +
 .../go-dqlite/internal/client/client.go       |  322 +++
 .../go-dqlite/internal/client/config.go       |   14 +
 .../go-dqlite/internal/client/connector.go    |  231 ++
 .../go-dqlite/internal/client/dial.go         |   23 +
 .../go-dqlite/internal/client/errors.go       |   29 +
 .../go-dqlite/internal/client/message.go      |  585 +++++
 .../go-dqlite/internal/client/request.go      |   98 +
 .../go-dqlite/internal/client/response.go     |  213 ++
 .../go-dqlite/internal/client/schema.go       |   26 +
 .../go-dqlite/internal/client/schema.sh       |  150 ++
 .../go-dqlite/internal/client/store.go        |   48 +
 .../go-dqlite/internal/connection/open.go     |   76 +
 .../go-dqlite/internal/connection/snapshot.go |   38 +
 .../go-dqlite/internal/connection/uri.go      |   61 +
 .../go-dqlite/internal/logging/func.go        |   26 +
 .../go-dqlite/internal/logging/level.go       |   27 +
 .../go-dqlite/internal/protocol/commands.go   |  107 +
 .../internal/protocol/commands.pb.go          | 2253 +++++++++++++++++
 .../internal/protocol/commands.proto          |   87 +
 .../go-dqlite/internal/protocol/doc.go        |   17 +
 .../go-dqlite/internal/registry/conn.go       |  154 ++
 .../go-dqlite/internal/registry/fsm.go        |   29 +
 .../go-dqlite/internal/registry/hook.go       |  191 ++
 .../go-dqlite/internal/registry/registry.go   |  149 ++
 .../go-dqlite/internal/registry/trace.go      |   36 +
 .../go-dqlite/internal/registry/txn.go        |  218 ++
 .../go-dqlite/internal/replication/doc.go     |   17 +
 .../go-dqlite/internal/replication/fsm.go     |  809 ++++++
 .../go-dqlite/internal/replication/methods.go |  794 ++++++
 .../go-dqlite/internal/replication/trace.go   |   23 +
 .../go-dqlite/internal/store/iterate.go       |   29 +
 .../go-dqlite/internal/store/range.go         |   59 +
 .../go-dqlite/internal/store/replay.go        |   47 +
 .../go-dqlite/internal/trace/buffer.go        |   88 +
 .../go-dqlite/internal/trace/constants.go     |   21 +
 .../go-dqlite/internal/trace/cursor.go        |   40 +
 .../go-dqlite/internal/trace/doc.go           |   17 +
 .../go-dqlite/internal/trace/entry.go         |   65 +
 .../go-dqlite/internal/trace/field.go         |   56 +
 .../go-dqlite/internal/trace/set.go           |  131 +
 .../go-dqlite/internal/trace/time.go          |   23 +
 .../go-dqlite/internal/trace/tracer.go        |   96 +
 .../go-dqlite/internal/transaction/state.go   |   67 +
 .../go-dqlite/internal/transaction/txn.go     |  237 ++
 .../github.com/CanonicalLtd/go-dqlite/log.go  |   33 +
 .../go-dqlite/recover/delete/delete.go        |   36 +
 .../go-dqlite/recover/dump/dump.go            |  105 +
 .../go-dqlite/recover/dump/options.go         |   56 +
 .../CanonicalLtd/go-dqlite/recover/open.go    |   30 +
 .../CanonicalLtd/go-dqlite/registry.go        |   50 +
 .../CanonicalLtd/go-dqlite/run-demo           |   62 +
 .../CanonicalLtd/go-dqlite/server.go          |  233 ++
 .../CanonicalLtd/go-dqlite/store.go           |  141 ++
 .../github.com/CanonicalLtd/raft-http/AUTHORS |    1 +
 .../github.com/CanonicalLtd/raft-http/LICENSE |  201 ++
 .../CanonicalLtd/raft-http/README.md          |   14 +
 .../github.com/CanonicalLtd/raft-http/dial.go |   50 +
 .../github.com/CanonicalLtd/raft-http/doc.go  |   67 +
 .../CanonicalLtd/raft-http/handler.go         |  212 ++
 .../CanonicalLtd/raft-http/layer.go           |  132 +
 .../CanonicalLtd/raft-http/membership.go      |   95 +
 .../CanonicalLtd/raft-membership/AUTHORS      |    1 +
 .../CanonicalLtd/raft-membership/LICENSE      |  201 ++
 .../CanonicalLtd/raft-membership/README.md    |   11 +
 .../CanonicalLtd/raft-membership/changer.go   |   36 +
 .../CanonicalLtd/raft-membership/errors.go    |   49 +
 .../CanonicalLtd/raft-membership/handle.go    |   65 +
 .../CanonicalLtd/raft-membership/request.go   |  113 +
 .../github.com/CanonicalLtd/raft-test/AUTHORS |    1 +
 .../github.com/CanonicalLtd/raft-test/LICENSE |  201 ++
 .../CanonicalLtd/raft-test/README.md          |   11 +
 .../CanonicalLtd/raft-test/cluster.go         |  303 +++
 .../CanonicalLtd/raft-test/control.go         |  519 ++++
 .../CanonicalLtd/raft-test/duration.go        |   45 +
 .../github.com/CanonicalLtd/raft-test/fsm.go  |   60 +
 .../raft-test/internal/election/future.go     |   61 +
 .../raft-test/internal/election/leadership.go |   43 +
 .../raft-test/internal/election/notifier.go   |  149 ++
 .../raft-test/internal/election/tracker.go    |  112 +
 .../raft-test/internal/event/event.go         |   54 +
 .../raft-test/internal/fsms/watcher.go        |   77 +
 .../raft-test/internal/fsms/wrapper.go        |  188 ++
 .../raft-test/internal/logging/logger.go      |   50 +
 .../raft-test/internal/network/logs.go        |   76 +
 .../raft-test/internal/network/network.go     |  147 ++
 .../raft-test/internal/network/peers.go       |  307 +++
 .../raft-test/internal/network/pipeline.go    |  166 ++
 .../raft-test/internal/network/schedule.go    |  178 ++
 .../raft-test/internal/network/transport.go   |  268 ++
 .../CanonicalLtd/raft-test/options.go         |  107 +
 .../CanonicalLtd/raft-test/server.go          |   36 +
 .../github.com/CanonicalLtd/raft-test/term.go |  219 ++
 .../github.com/hashicorp/raft-boltdb/LICENSE  |  362 +++
 .../github.com/hashicorp/raft-boltdb/Makefile |   11 +
 .../hashicorp/raft-boltdb/README.md           |   11 +
 .../hashicorp/raft-boltdb/bolt_store.go       |  268 ++
 .../github.com/hashicorp/raft-boltdb/util.go  |   37 +
 vendor/github.com/hashicorp/raft/CHANGELOG.md |   16 +
 vendor/github.com/hashicorp/raft/LICENSE      |  354 +++
 vendor/github.com/hashicorp/raft/Makefile     |   20 +
 vendor/github.com/hashicorp/raft/README.md    |  107 +
 vendor/github.com/hashicorp/raft/api.go       | 1013 ++++++++
 vendor/github.com/hashicorp/raft/commands.go  |  151 ++
 .../github.com/hashicorp/raft/commitment.go   |  101 +
 vendor/github.com/hashicorp/raft/config.go    |  265 ++
 .../hashicorp/raft/configuration.go           |  343 +++
 .../hashicorp/raft/discard_snapshot.go        |   49 +
 .../hashicorp/raft/file_snapshot.go           |  528 ++++
 vendor/github.com/hashicorp/raft/fsm.go       |  136 +
 vendor/github.com/hashicorp/raft/future.go    |  289 +++
 vendor/github.com/hashicorp/raft/go.mod       |   10 +
 vendor/github.com/hashicorp/raft/go.sum       |   37 +
 .../hashicorp/raft/inmem_snapshot.go          |  109 +
 .../github.com/hashicorp/raft/inmem_store.go  |  130 +
 .../hashicorp/raft/inmem_transport.go         |  335 +++
 vendor/github.com/hashicorp/raft/log.go       |   72 +
 vendor/github.com/hashicorp/raft/log_cache.go |   79 +
 .../github.com/hashicorp/raft/membership.md   |   83 +
 .../hashicorp/raft/net_transport.go           |  757 ++++++
 vendor/github.com/hashicorp/raft/observer.go  |  131 +
 vendor/github.com/hashicorp/raft/peersjson.go |   98 +
 vendor/github.com/hashicorp/raft/raft.go      | 1486 +++++++++++
 .../github.com/hashicorp/raft/replication.go  |  572 +++++
 vendor/github.com/hashicorp/raft/snapshot.go  |  239 ++
 vendor/github.com/hashicorp/raft/stable.go    |   15 +
 vendor/github.com/hashicorp/raft/state.go     |  171 ++
 vendor/github.com/hashicorp/raft/tag.sh       |   16 +
 .../hashicorp/raft/tcp_transport.go           |  116 +
 vendor/github.com/hashicorp/raft/transport.go |  124 +
 vendor/github.com/hashicorp/raft/util.go      |  133 +
 vendor/vendor.json                            |   45 +
 159 files changed, 24924 insertions(+)
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/AUTHORS
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/LICENSE
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/Makefile
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/README.md
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/cluster.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/bench.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/delete.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/dump.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/main.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/root.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/config.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/doc.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/driver.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/fsm.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/build.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/cluster.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/config.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/conn.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/datatype.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/errors.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/logger.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/server.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/status.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/testing.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/vfs.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal_replication.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/buffer.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/client.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/config.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/connector.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/dial.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/errors.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/message.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/request.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/response.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.go
 create mode 100755 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.sh
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/client/store.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/open.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/snapshot.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/uri.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/func.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/level.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.pb.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.proto
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/doc.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/conn.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/fsm.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/hook.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/registry.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/trace.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/txn.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/doc.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/fsm.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/methods.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/trace.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/store/iterate.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/store/range.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/store/replay.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/buffer.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/constants.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/cursor.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/doc.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/entry.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/field.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/set.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/time.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/tracer.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/state.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/txn.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/log.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/recover/delete/delete.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/dump.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/options.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/recover/open.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/registry.go
 create mode 100755 vendor/github.com/CanonicalLtd/go-dqlite/run-demo
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/server.go
 create mode 100644 vendor/github.com/CanonicalLtd/go-dqlite/store.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/AUTHORS
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/LICENSE
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/README.md
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/dial.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/doc.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/handler.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/layer.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-http/membership.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/AUTHORS
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/LICENSE
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/README.md
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/changer.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/errors.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/handle.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-membership/request.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/AUTHORS
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/LICENSE
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/README.md
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/cluster.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/control.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/duration.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/fsm.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/election/future.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/election/leadership.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/election/notifier.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/election/tracker.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/event/event.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/fsms/watcher.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/fsms/wrapper.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/logging/logger.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/network/logs.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/network/network.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/network/peers.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/network/pipeline.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/network/schedule.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/internal/network/transport.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/options.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/server.go
 create mode 100644 vendor/github.com/CanonicalLtd/raft-test/term.go
 create mode 100644 vendor/github.com/hashicorp/raft-boltdb/LICENSE
 create mode 100644 vendor/github.com/hashicorp/raft-boltdb/Makefile
 create mode 100644 vendor/github.com/hashicorp/raft-boltdb/README.md
 create mode 100644 vendor/github.com/hashicorp/raft-boltdb/bolt_store.go
 create mode 100644 vendor/github.com/hashicorp/raft-boltdb/util.go
 create mode 100644 vendor/github.com/hashicorp/raft/CHANGELOG.md
 create mode 100644 vendor/github.com/hashicorp/raft/LICENSE
 create mode 100644 vendor/github.com/hashicorp/raft/Makefile
 create mode 100644 vendor/github.com/hashicorp/raft/README.md
 create mode 100644 vendor/github.com/hashicorp/raft/api.go
 create mode 100644 vendor/github.com/hashicorp/raft/commands.go
 create mode 100644 vendor/github.com/hashicorp/raft/commitment.go
 create mode 100644 vendor/github.com/hashicorp/raft/config.go
 create mode 100644 vendor/github.com/hashicorp/raft/configuration.go
 create mode 100644 vendor/github.com/hashicorp/raft/discard_snapshot.go
 create mode 100644 vendor/github.com/hashicorp/raft/file_snapshot.go
 create mode 100644 vendor/github.com/hashicorp/raft/fsm.go
 create mode 100644 vendor/github.com/hashicorp/raft/future.go
 create mode 100644 vendor/github.com/hashicorp/raft/go.mod
 create mode 100644 vendor/github.com/hashicorp/raft/go.sum
 create mode 100644 vendor/github.com/hashicorp/raft/inmem_snapshot.go
 create mode 100644 vendor/github.com/hashicorp/raft/inmem_store.go
 create mode 100644 vendor/github.com/hashicorp/raft/inmem_transport.go
 create mode 100644 vendor/github.com/hashicorp/raft/log.go
 create mode 100644 vendor/github.com/hashicorp/raft/log_cache.go
 create mode 100644 vendor/github.com/hashicorp/raft/membership.md
 create mode 100644 vendor/github.com/hashicorp/raft/net_transport.go
 create mode 100644 vendor/github.com/hashicorp/raft/observer.go
 create mode 100644 vendor/github.com/hashicorp/raft/peersjson.go
 create mode 100644 vendor/github.com/hashicorp/raft/raft.go
 create mode 100644 vendor/github.com/hashicorp/raft/replication.go
 create mode 100644 vendor/github.com/hashicorp/raft/snapshot.go
 create mode 100644 vendor/github.com/hashicorp/raft/stable.go
 create mode 100644 vendor/github.com/hashicorp/raft/state.go
 create mode 100755 vendor/github.com/hashicorp/raft/tag.sh
 create mode 100644 vendor/github.com/hashicorp/raft/tcp_transport.go
 create mode 100644 vendor/github.com/hashicorp/raft/transport.go
 create mode 100644 vendor/github.com/hashicorp/raft/util.go
 create mode 100644 vendor/vendor.json

diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/AUTHORS b/vendor/github.com/CanonicalLtd/go-dqlite/AUTHORS
new file mode 100644
index 0000000000..6e13f86ebb
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/AUTHORS
@@ -0,0 +1 @@
+Free Ekanayaka <free.ekanayaka at canonical.com>
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/LICENSE b/vendor/github.com/CanonicalLtd/go-dqlite/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/Makefile b/vendor/github.com/CanonicalLtd/go-dqlite/Makefile
new file mode 100644
index 0000000000..edf4a49e21
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/Makefile
@@ -0,0 +1,4 @@
+proto:
+	protoc --gogofast_out=. internal/protocol/commands.proto
+
+.PHONY: proto
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/README.md b/vendor/github.com/CanonicalLtd/go-dqlite/README.md
new file mode 100644
index 0000000000..4c02176c50
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/README.md
@@ -0,0 +1,99 @@
+go-dqlite [![Build Status](https://travis-ci.org/CanonicalLtd/go-dqlite.png)](https://travis-ci.org/CanonicalLtd/go-dqlite) [![Coverage Status](https://coveralls.io/repos/github/CanonicalLtd/go-dqlite/badge.svg?branch=master)](https://coveralls.io/github/CanonicalLtd/go-dqlite?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/CanonicalLtd/go-dqlite)](https://goreportcard.com/report/github.com/CanonicalLtd/go-dqlite) [![GoDoc](https://godoc.org/github.com/CanonicalLtd/go-dqlite?status.svg)](https://godoc.org/github.com/CanonicalLtd/go-dqlite)
+======
+
+This repository provides the `dqlite` Go package, which can be used to
+replicate a SQLite database across a cluster, using the Raft
+algorithm.
+
+Design higlights
+----------------
+
+* No external processes needed: dqlite is just a Go library, you link it
+  it to your application exactly like you would with SQLite.
+* Replication needs a [SQLite patch](https://github.com/CanonicalLtd/sqlite/commit/2a9aa8b056f37ae05f38835182a2856ffc95aee4)
+  which is not yet included upstream.
+* The Go [Raft package](https://github.com/hashicorp/raft) from Hashicorp
+  is used internally for replicating the write-ahead log frames of SQLite
+  across all nodes.
+
+How does it compare to rqlite?
+------------------------------
+
+The main differences from [rqlite](https://github.com/rqlite/rqlite) are:
+
+* Full support for transactions
+* No need for statements to be deterministic (e.g. you can use ```time()```)
+* Frame-based replication instead of statement-based replication, this
+  means in dqlite there's more data flowing between nodes, so expect
+  lower performance. Should not really matter for most use cases.
+
+Status
+------
+
+This is **beta** software for now, but we'll get to rc/release soon.
+
+Demo
+----
+
+To see dqlite in action, make sure you have the following dependencies
+installed:
+
+* Go (tested on 1.8)
+* gcc
+* any dependency/header that SQLite needs to build from source
+* Python 3
+
+Then run:
+
+```
+go get -d github.com/CanonicalLtd/dqlite
+cd $GOPATH/src/github.com/CanonicalLtd/dqlite
+make dependencies
+./run-demo
+```
+
+This should spawn three dqlite-based nodes, each of one running the
+code in the [demo Go source](testdata/demo.go).
+
+Each node inserts data in a test table and then dies abruptly after a
+random timeout. Leftover transactions and failover to other nodes
+should be handled gracefully.
+
+While the demo is running, to get more details about what's going on
+behind the scenes you can also open another terminal and run a command
+like:
+
+```
+watch ls -l /tmp/dqlite-demo-*/ /tmp/dqlite-demo-*/snapshots/
+```
+
+and see how the data directories of the three nodes evolve in terms
+SQLite databases (```test.db```), write-ahead log files (```test.db-wal```),
+raft logs store (```raft.db```), and raft snapshots.
+
+
+Documentation
+-------------
+
+The documentation for this package can be found on [Godoc](http://godoc.org/github.com/CanonicalLtd/dqlite).
+
+FAQ
+---
+
+**Q**: How does dqlite behave during conflict situations? Does Raft
+select a winning WAL write and any others in flight are aborted?
+
+**A**: There can't be a conflict situation. Raft's model is that
+only the leader can append new log entries, which translated to dqlite
+means that only the leader can write new WAL frames. So this means
+that any attempt to perform a write transaction on a non-leader node
+will fail with a sqlite3x.ErrNotLeader error (and in this case clients
+are supposed to retry against whoever is the new leader).
+
+**Q**: When not enough nodes are available, are writes hung until
+consensus?
+
+**A**: Yes, however there's a (configurable) timeout. This is a
+consequence of Raft sitting in the CP spectrum of the CAP theorem: in
+case of a network partition it chooses consistency and sacrifices
+availability.
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/cluster.go b/vendor/github.com/CanonicalLtd/go-dqlite/cluster.go
new file mode 100644
index 0000000000..3c8d743980
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/cluster.go
@@ -0,0 +1,123 @@
+package dqlite
+
+import (
+	"strconv"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/protocol"
+	"github.com/CanonicalLtd/go-dqlite/internal/registry"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+type cluster struct {
+	raft     *raft.Raft                 // Raft instance
+	registry *registry.Registry         // Connection registry
+	provider raft.ServerAddressProvider // Custom address provider
+}
+
+func (c *cluster) Leader() string {
+	return string(c.raft.Leader())
+}
+
+func (c *cluster) Servers() ([]bindings.ServerInfo, error) {
+	if c.raft.State() != raft.Leader {
+		return nil, raft.ErrNotLeader
+	}
+
+	future := c.raft.GetConfiguration()
+
+	if err := future.Error(); err != nil {
+		return nil, errors.Wrap(err, "failed to get raft configuration")
+	}
+
+	configuration := future.Configuration()
+
+	servers := make([]bindings.ServerInfo, len(configuration.Servers))
+
+	for i := range servers {
+		server := configuration.Servers[i]
+
+		id, err := strconv.Atoi(string(server.ID))
+		if err != nil {
+			return nil, errors.Wrap(err, "server ID is not a number")
+		}
+		servers[i].ID = uint64(id)
+
+		if c.provider != nil {
+			address, err := c.provider.ServerAddr(server.ID)
+			if err != nil {
+				return nil, errors.Wrap(err, "failed to fetch raft server address")
+			}
+			if address != "" {
+				servers[i].Address = string(address)
+				continue
+			}
+		}
+		servers[i].Address = string(server.Address)
+	}
+
+	return servers, nil
+}
+
+func (c *cluster) Register(conn *bindings.Conn) {
+	filename := conn.Filename()
+	c.registry.Lock()
+	defer c.registry.Unlock()
+	c.registry.ConnLeaderAdd(filename, conn)
+}
+
+func (c *cluster) Unregister(conn *bindings.Conn) {
+	c.registry.Lock()
+	defer c.registry.Unlock()
+	c.registry.ConnLeaderDel(conn)
+}
+
+func (c *cluster) Barrier() error {
+	if c.raft.State() != raft.Leader {
+		return bindings.Error{Code: bindings.ErrIoErrNotLeader}
+	}
+
+	c.registry.Lock()
+	index := c.registry.Index()
+	c.registry.Unlock()
+
+	if index == c.raft.LastIndex() {
+		return nil
+	}
+
+	timeout := time.Minute // TODO: make this configurable
+	if err := c.raft.Barrier(timeout).Error(); err != nil {
+		if err == raft.ErrLeadershipLost {
+			return bindings.Error{Code: bindings.ErrIoErrNotLeader}
+		}
+
+		// TODO: add an out-of-sync error to SQLite?
+		return errors.Wrap(err, "FSM out of sync")
+	}
+	c.registry.Lock()
+	c.registry.IndexUpdate(c.raft.LastIndex())
+	c.registry.Unlock()
+
+	return nil
+}
+
+func (c *cluster) Recover(token uint64) error {
+	return nil
+}
+
+func (c *cluster) Checkpoint(conn *bindings.Conn) error {
+	command := protocol.NewCheckpoint(conn.Filename())
+
+	data, err := protocol.MarshalCommand(command)
+	if err != nil {
+		return err
+	}
+
+	if err := c.raft.Apply(data, time.Second).Error(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/bench.go b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/bench.go
new file mode 100644
index 0000000000..425f07ec46
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/bench.go
@@ -0,0 +1,130 @@
+package main
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite"
+	"github.com/CanonicalLtd/raft-test"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+// Return a new bench command.
+func newBench() *cobra.Command {
+	bench := &cobra.Command{
+		Use:   "bench [address]",
+		Short: "Bench all raft logs after the given index (included).",
+		Args:  cobra.ExactArgs(2),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			address := args[0]
+			role := args[1]
+
+			if role == "server" {
+				return runServer(address)
+			}
+
+			return runClient(address)
+		},
+	}
+
+	return bench
+}
+
+func runServer(address string) error {
+	registry := dqlite.NewRegistry("0")
+	fsm := dqlite.NewFSM(registry)
+
+	t := &testing.T{}
+	r, cleanup := rafttest.Server(t, fsm, rafttest.Transport(func(i int) raft.Transport {
+		_, transport := raft.NewInmemTransport(raft.ServerAddress(address))
+		return transport
+	}))
+	defer cleanup()
+
+	listener, err := net.Listen("tcp", address)
+	if err != nil {
+		return errors.Wrap(err, "failed to listen")
+	}
+
+	server, err := dqlite.NewServer(r, registry, listener)
+	if err != nil {
+		return errors.Wrap(err, "failed to create server")
+	}
+
+	time.Sleep(time.Minute)
+
+	return server.Close()
+}
+
+func runClient(address string) error {
+	store, err := dqlite.DefaultServerStore(":memory:")
+	if err != nil {
+		return errors.Wrap(err, "failed to create server store")
+	}
+
+	if err := store.Set(context.Background(), []dqlite.ServerInfo{{Address: address}}); err != nil {
+		return errors.Wrap(err, "failed to set server address")
+	}
+
+	driver, err := dqlite.NewDriver(store)
+	if err != nil {
+		return errors.Wrap(err, "failed to create dqlite driver")
+	}
+
+	sql.Register("dqlite", driver)
+
+	ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
+	defer cancel()
+
+	db, err := sql.Open("dqlite", "test.db")
+	if err != nil {
+		return errors.Wrap(err, "failed to open database")
+	}
+	defer db.Close()
+
+	tx, err := db.Begin()
+	if err != nil {
+		return errors.Wrap(err, "failed to begin transaction")
+	}
+
+	start := time.Now()
+
+	if _, err := tx.ExecContext(ctx, "CREATE TABLE test (n INT, t TEXT)"); err != nil {
+		return errors.Wrapf(err, "failed to create test table")
+	}
+
+	for i := 0; i < 100; i++ {
+		if _, err := tx.ExecContext(ctx, "INSERT INTO test(n,t) VALUES(?, ?)", int64(i), "hello"); err != nil {
+			return errors.Wrapf(err, "failed to insert test value")
+		}
+	}
+
+	rows, err := tx.QueryContext(ctx, "SELECT n FROM test")
+	if err != nil {
+		return errors.Wrapf(err, "failed to query test table")
+	}
+
+	for rows.Next() {
+		var n int64
+		if err := rows.Scan(&n); err != nil {
+			return errors.Wrap(err, "failed to scan row")
+		}
+	}
+	if err := rows.Err(); err != nil {
+		return errors.Wrap(err, "result set failure")
+	}
+
+	if err := tx.Commit(); err != nil {
+		return errors.Wrap(err, "failed to commit transaction")
+	}
+
+	fmt.Printf("time %s\n", time.Since(start))
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/delete.go b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/delete.go
new file mode 100644
index 0000000000..b5a1396929
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/delete.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+	"strconv"
+
+	"github.com/CanonicalLtd/go-dqlite/recover"
+	"github.com/CanonicalLtd/go-dqlite/recover/delete"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+// Return a new delete command.
+func newDelete() *cobra.Command {
+	delete := &cobra.Command{
+		Use:   "delete [path to raft data dir] [index]",
+		Short: "Delete all raft logs after the given index (included).",
+		Args:  cobra.ExactArgs(2),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			dir := args[0]
+
+			index, err := strconv.Atoi(args[1])
+			if err != nil {
+				return errors.Wrapf(err, "invalid index: %s", args[1])
+			}
+
+			logs, _, err := recover.Open(dir)
+			if err != nil {
+				return err
+			}
+
+			if err := delete.Delete(logs, uint64(index)); err != nil {
+				return err
+			}
+
+			return nil
+		},
+	}
+
+	return delete
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/dump.go b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/dump.go
new file mode 100644
index 0000000000..2a688907e8
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/dump.go
@@ -0,0 +1,78 @@
+package main
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/CanonicalLtd/go-dqlite/recover"
+	"github.com/CanonicalLtd/go-dqlite/recover/dump"
+	"github.com/hashicorp/raft"
+	"github.com/hashicorp/raft-boltdb"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+// Return a new dump command.
+func newDump() *cobra.Command {
+	var head int
+	var tail int
+	var replay string
+
+	dump := &cobra.Command{
+		Use:   "dump [path to raft data dir]",
+		Short: "Dump or replay the content of a dqlite raft store.",
+		Args:  cobra.ExactArgs(1),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			dir := args[0]
+
+			logs, snaps, err := recover.Open(dir)
+			if err != nil {
+				return err
+			}
+
+			options := make([]dump.Option, 0)
+
+			if head != 0 {
+				options = append(options, dump.Head(head))
+			}
+			if tail != 0 {
+				options = append(options, dump.Tail(tail))
+			}
+			if replay != "" {
+				options = append(options, dump.Replay(replay))
+			}
+
+			if err := dump.Dump(logs, snaps, os.Stdout, options...); err != nil {
+				return err
+			}
+
+			return nil
+		},
+	}
+
+	flags := dump.Flags()
+	flags.IntVarP(&head, "head", "H", 0, "limit the dump to the first N log commands")
+	flags.IntVarP(&tail, "tail", "t", 0, "limit the dump to the last N log commands")
+	flags.StringVarP(&replay, "replay", "r", "", "replay the logs to the given database dir")
+
+	return dump
+}
+
+func dumpOpen(dir string) (raft.LogStore, raft.SnapshotStore, error) {
+	if _, err := os.Stat(dir); err != nil {
+		return nil, nil, errors.Wrap(err, "invalid raft data dir")
+	}
+
+	logs, err := raftboltdb.NewBoltStore(filepath.Join(dir, "logs.db"))
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "failed to open boltdb file")
+	}
+
+	snaps, err := raft.NewFileSnapshotStore(dir, 1, ioutil.Discard)
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "failed to open snapshot store")
+	}
+
+	return logs, snaps, nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/main.go b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/main.go
new file mode 100644
index 0000000000..1f9f41df45
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+	"os"
+)
+
+func main() {
+	root := newRoot()
+	if err := root.Execute(); err != nil {
+		os.Exit(1)
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/root.go b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/root.go
new file mode 100644
index 0000000000..87900c761e
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/cmd/dqlite/root.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// Return a new root command.
+func newRoot() *cobra.Command {
+	root := &cobra.Command{
+		Use:   "dqlite",
+		Short: "Distributed SQLite for Go applications",
+		Long: `Replicate a SQLite database across a cluster, using the Raft algorithm.
+
+Complete documentation is available at https://github.com/CanonicalLtd/go-dqlite`,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			return fmt.Errorf("not implemented")
+		},
+	}
+	root.AddCommand(newDump())
+	root.AddCommand(newDelete())
+	root.AddCommand(newBench())
+
+	return root
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/config.go b/vendor/github.com/CanonicalLtd/go-dqlite/config.go
new file mode 100644
index 0000000000..37c20fc6dc
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/config.go
@@ -0,0 +1,31 @@
+package dqlite
+
+/*
+import (
+	"fmt"
+	"os"
+
+	"github.com/pkg/errors"
+)
+
+// Ensure that the configured directory exists and is accessible.
+func ensureDir(dir string) error {
+	if dir == "" {
+		return fmt.Errorf("no data dir provided in config")
+	}
+	info, err := os.Stat(dir)
+	if err != nil {
+		if os.IsNotExist(err) {
+			if err := os.MkdirAll(dir, 0700); err != nil {
+				return errors.Wrap(err, "failed to create data dir")
+			}
+			return nil
+		}
+		return errors.Wrap(err, "failed to access data dir")
+	}
+	if !info.IsDir() {
+		return fmt.Errorf("data dir '%s' is not a directory", dir)
+	}
+	return nil
+}
+*/
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/doc.go b/vendor/github.com/CanonicalLtd/go-dqlite/doc.go
new file mode 100644
index 0000000000..98fc5d42b2
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dqlite implements a database/sql/driver with raft-based
+// SQLite replication.
+package dqlite
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/driver.go b/vendor/github.com/CanonicalLtd/go-dqlite/driver.go
new file mode 100644
index 0000000000..316c96d4b9
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/driver.go
@@ -0,0 +1,646 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dqlite
+
+import (
+	"context"
+	"database/sql/driver"
+	"io"
+	"net"
+	"reflect"
+	"time"
+
+	"github.com/Rican7/retry/backoff"
+	"github.com/Rican7/retry/strategy"
+	"github.com/pkg/errors"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/client"
+	"github.com/CanonicalLtd/go-dqlite/internal/connection"
+)
+
+// Driver perform queries against a dqlite server.
+type Driver struct {
+	log               LogFunc         // Log function to use
+	store             ServerStore     // Holds addresses of dqlite servers
+	context           context.Context // Global cancellation context
+	connectionTimeout time.Duration   // Max time to wait for a new connection
+	contextTimeout    time.Duration   // Default client context timeout.
+	clientConfig      client.Config   // Configuration for dqlite client instances
+}
+
+// DriverError is returned in case of database errors.
+type DriverError = bindings.Error
+
+// DriverOption can be used to tweak driver parameters.
+type DriverOption func(*driverOptions)
+
+// WithLogFunc sets a custom logging function.
+func WithLogFunc(log LogFunc) DriverOption {
+	return func(options *driverOptions) {
+		options.Log = log
+	}
+}
+
+// DialFunc is a function that can be used to establish a network connection.
+type DialFunc client.DialFunc
+
+// WithDialFunc sets a custom dial function.
+func WithDialFunc(dial DialFunc) DriverOption {
+	return func(options *driverOptions) {
+		options.Dial = client.DialFunc(dial)
+	}
+}
+
+// WithConnectionTimeout sets the connection timeout.
+//
+// If not used, the default is 5 seconds.
+func WithConnectionTimeout(timeout time.Duration) DriverOption {
+	return func(options *driverOptions) {
+		options.ConnectionTimeout = timeout
+	}
+}
+
+// WithContextTimeout sets the default client context timeout when no context
+// deadline is provided.
+//
+// If not used, the default is 5 seconds.
+func WithContextTimeout(timeout time.Duration) DriverOption {
+	return func(options *driverOptions) {
+		options.ContextTimeout = timeout
+	}
+}
+
+// WithConnectionBackoffFactor sets the exponential backoff factor for retrying
+// failed connection attempts.
+//
+// If not used, the default is 50 milliseconds.
+func WithConnectionBackoffFactor(factor time.Duration) DriverOption {
+	return func(options *driverOptions) {
+		options.ConnectionBackoffFactor = factor
+	}
+}
+
+// WithConnectionBackoffCap sets the maximum connection retry backoff value,
+// (regardless of the backoff factor) for retrying failed connection attempts.
+//
+// If not used, the default is 1 second.
+func WithConnectionBackoffCap(cap time.Duration) DriverOption {
+	return func(options *driverOptions) {
+		options.ConnectionBackoffCap = cap
+	}
+}
+
+// WithContext sets a global cancellation context.
+func WithContext(context context.Context) DriverOption {
+	return func(options *driverOptions) {
+		options.Context = context
+	}
+}
+
+// NewDriver creates a new dqlite driver, which also implements the
+// driver.Driver interface.
+func NewDriver(store ServerStore, options ...DriverOption) (*Driver, error) {
+	o := defaultDriverOptions()
+
+	for _, option := range options {
+		option(o)
+	}
+
+	driver := &Driver{
+		log:               o.Log,
+		store:             store,
+		context:           o.Context,
+		connectionTimeout: o.ConnectionTimeout,
+		contextTimeout:    o.ContextTimeout,
+	}
+
+	driver.clientConfig.Dial = o.Dial
+	driver.clientConfig.AttemptTimeout = 5 * time.Second
+	driver.clientConfig.RetryStrategies = []strategy.Strategy{
+		driverConnectionRetryStrategy(
+			o.ConnectionBackoffFactor,
+			o.ConnectionBackoffCap,
+		),
+	}
+
+	return driver, nil
+}
+
+// Hold configuration options for a dqlite driver.
+type driverOptions struct {
+	Log                     LogFunc
+	Dial                    client.DialFunc
+	ConnectionTimeout       time.Duration
+	ContextTimeout          time.Duration
+	ConnectionBackoffFactor time.Duration
+	ConnectionBackoffCap    time.Duration
+	Context                 context.Context
+}
+
+// Create a driverOptions object with sane defaults.
+func defaultDriverOptions() *driverOptions {
+	return &driverOptions{
+		Log:                     defaultLogFunc(),
+		Dial:                    client.TCPDial,
+		ConnectionTimeout:       15 * time.Second,
+		ContextTimeout:          5 * time.Second,
+		ConnectionBackoffFactor: 50 * time.Millisecond,
+		ConnectionBackoffCap:    time.Second,
+		Context:                 context.Background(),
+	}
+}
+
+// Return a retry strategy with jittered exponential backoff, capped at the
+// given amount of time.
+func driverConnectionRetryStrategy(factor, cap time.Duration) strategy.Strategy {
+	backoff := backoff.BinaryExponential(factor)
+
+	return func(attempt uint) bool {
+		if attempt > 0 {
+			duration := backoff(attempt)
+			if duration > cap {
+				duration = cap
+			}
+			time.Sleep(duration)
+		}
+
+		return true
+	}
+}
+
+// Open establishes a new connection to a SQLite database on the dqlite server.
+//
+// The given name must be a pure file name without any directory segment,
+// dqlite will connect to a database with that name in its data directory.
+//
+// Query parameters are always valid except for "mode=memory".
+//
+// If this node is not the leader, or the leader is unknown an ErrNotLeader
+// error is returned.
+func (d *Driver) Open(uri string) (driver.Conn, error) {
+	// Validate the given data source string.
+	filename, flags, err := connection.ParseURI(uri)
+	if err != nil {
+		return nil, errors.Wrapf(err, "invalid URI %s", uri)
+	}
+
+	ctx, cancel := context.WithTimeout(d.context, d.connectionTimeout)
+	defer cancel()
+
+	// TODO: generate a client ID.
+	connector := client.NewConnector(0, d.store, d.clientConfig, d.log)
+
+	conn := &Conn{
+		log:            d.log,
+		contextTimeout: d.contextTimeout,
+	}
+
+	conn.client, err = connector.Connect(ctx)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create dqlite connection")
+	}
+	conn.client.SetContextTimeout(d.contextTimeout)
+
+	conn.request.Init(4096)
+	conn.response.Init(4096)
+
+	defer conn.request.Reset()
+	defer conn.response.Reset()
+
+	client.EncodeOpen(&conn.request, filename, flags, "volatile")
+
+	if err := conn.client.Call(ctx, &conn.request, &conn.response); err != nil {
+		conn.client.Close()
+		return nil, errors.Wrap(err, "failed to open database")
+	}
+
+	conn.id, err = client.DecodeDb(&conn.response)
+	if err != nil {
+		conn.client.Close()
+		return nil, errors.Wrap(err, "failed to open database")
+	}
+
+	return conn, nil
+}
+
+// SetContextTimeout sets the default client timeout when no context deadline
+// is provided.
+func (d *Driver) SetContextTimeout(timeout time.Duration) {
+	d.contextTimeout = timeout
+}
+
+// ErrNoAvailableLeader is returned as root cause of Open() if there's no
+// leader available in the cluster.
+var ErrNoAvailableLeader = client.ErrNoAvailableLeader
+
+// Conn implements the sql.Conn interface.
+type Conn struct {
+	log            LogFunc
+	client         *client.Client
+	request        client.Message
+	response       client.Message
+	id             uint32 // Database ID.
+	contextTimeout time.Duration
+}
+
+// PrepareContext returns a prepared statement, bound to this connection.
+// context is for the preparation of the statement, it must not store the
+// context within the statement itself.
+func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+	defer c.request.Reset()
+	defer c.response.Reset()
+
+	stmt := &Stmt{
+		client:   c.client,
+		request:  &c.request,
+		response: &c.response,
+	}
+
+	client.EncodePrepare(&c.request, uint64(c.id), query)
+
+	if err := c.client.Call(ctx, &c.request, &c.response); err != nil {
+		return nil, driverError(err)
+	}
+
+	var err error
+	stmt.db, stmt.id, stmt.params, err = client.DecodeStmt(&c.response)
+	if err != nil {
+		return nil, driverError(err)
+	}
+
+	return stmt, nil
+}
+
+// Prepare returns a prepared statement, bound to this connection.
+func (c *Conn) Prepare(query string) (driver.Stmt, error) {
+	return c.PrepareContext(context.Background(), query)
+}
+
+// ExecContext is an optional interface that may be implemented by a Conn.
+func (c *Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+	defer c.request.Reset()
+	defer c.response.Reset()
+
+	client.EncodeExecSQL(&c.request, uint64(c.id), query, args)
+
+	if err := c.client.Call(ctx, &c.request, &c.response); err != nil {
+		return nil, driverError(err)
+	}
+
+	result, err := client.DecodeResult(&c.response)
+	if err != nil {
+		return nil, driverError(err)
+	}
+
+	return &Result{result: result}, nil
+}
+
+// Query is an optional interface that may be implemented by a Conn.
+func (c *Conn) Query(query string, args []driver.Value) (driver.Rows, error) {
+	return c.QueryContext(context.Background(), query, valuesToNamedValues(args))
+}
+
+// QueryContext is an optional interface that may be implemented by a Conn.
+func (c *Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+	defer c.request.Reset()
+
+	client.EncodeQuerySQL(&c.request, uint64(c.id), query, args)
+
+	if err := c.client.Call(ctx, &c.request, &c.response); err != nil {
+		return nil, driverError(err)
+	}
+
+	rows, err := client.DecodeRows(&c.response)
+	if err != nil {
+		return nil, driverError(err)
+	}
+
+	return &Rows{ctx: ctx, request: &c.request, response: &c.response, client: c.client, rows: rows}, nil
+}
+
+// Exec is an optional interface that may be implemented by a Conn.
+func (c *Conn) Exec(query string, args []driver.Value) (driver.Result, error) {
+	return c.ExecContext(context.Background(), query, valuesToNamedValues(args))
+}
+
+// Close invalidates and potentially stops any current prepared statements and
+// transactions, marking this connection as no longer in use.
+//
+// Because the sql package maintains a free pool of connections and only calls
+// Close when there's a surplus of idle connections, it shouldn't be necessary
+// for drivers to do their own connection caching.
+func (c *Conn) Close() error {
+	return c.client.Close()
+}
+
+// BeginTx starts and returns a new transaction.  If the context is canceled by
+// the user the sql package will call Tx.Rollback before discarding and closing
+// the connection.
+//
+// This must check opts.Isolation to determine if there is a set isolation
+// level. If the driver does not support a non-default level and one is set or
+// if there is a non-default isolation level that is not supported, an error
+// must be returned.
+//
+// This must also check opts.ReadOnly to determine if the read-only value is
+// true to either set the read-only transaction property if supported or return
+// an error if it is not supported.
+func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+	if _, err := c.ExecContext(ctx, "BEGIN", nil); err != nil {
+		return nil, driverError(err)
+	}
+
+	tx := &Tx{
+		conn: c,
+	}
+
+	return tx, nil
+}
+
+// Begin starts and returns a new transaction.
+//
+// Deprecated: Drivers should implement ConnBeginTx instead (or additionally).
+func (c *Conn) Begin() (driver.Tx, error) {
+	return c.BeginTx(context.Background(), driver.TxOptions{})
+}
+
+// Tx is a transaction.
+type Tx struct {
+	conn *Conn
+}
+
+// Commit the transaction.
+func (tx *Tx) Commit() error {
+	ctx, cancel := context.WithTimeout(context.Background(), tx.conn.contextTimeout)
+	defer cancel()
+
+	if _, err := tx.conn.ExecContext(ctx, "COMMIT", nil); err != nil {
+		return driverError(err)
+	}
+
+	return nil
+}
+
+// Rollback the transaction.
+func (tx *Tx) Rollback() error {
+	ctx, cancel := context.WithTimeout(context.Background(), tx.conn.contextTimeout)
+	defer cancel()
+
+	if _, err := tx.conn.ExecContext(ctx, "ROLLBACK", nil); err != nil {
+		return driverError(err)
+	}
+
+	return nil
+}
+
+// Stmt is a prepared statement. It is bound to a Conn and not
+// used by multiple goroutines concurrently.
+type Stmt struct {
+	client   *client.Client
+	request  *client.Message
+	response *client.Message
+	db       uint32
+	id       uint32
+	params   uint64
+}
+
+// Close closes the statement.
+func (s *Stmt) Close() error {
+	defer s.request.Reset()
+	defer s.response.Reset()
+
+	client.EncodeFinalize(s.request, s.db, s.id)
+
+	ctx := context.Background()
+
+	if err := s.client.Call(ctx, s.request, s.response); err != nil {
+		return driverError(err)
+	}
+
+	if err := client.DecodeEmpty(s.response); err != nil {
+		return driverError(err)
+	}
+
+	return nil
+}
+
+// NumInput returns the number of placeholder parameters.
+func (s *Stmt) NumInput() int {
+	return int(s.params)
+}
+
+// ExecContext executes a query that doesn't return rows, such
+// as an INSERT or UPDATE.
+//
+// ExecContext must honor the context timeout and return when it is canceled.
+func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+	defer s.request.Reset()
+	defer s.response.Reset()
+
+	client.EncodeExec(s.request, s.db, s.id, args)
+
+	if err := s.client.Call(ctx, s.request, s.response); err != nil {
+		return nil, driverError(err)
+	}
+
+	result, err := client.DecodeResult(s.response)
+	if err != nil {
+		return nil, driverError(err)
+	}
+
+	return &Result{result: result}, nil
+}
+
+// Exec executes a query that doesn't return rows, such
+func (s *Stmt) Exec(args []driver.Value) (driver.Result, error) {
+	return s.ExecContext(context.Background(), valuesToNamedValues(args))
+}
+
+// QueryContext executes a query that may return rows, such as a
+// SELECT.
+//
+// QueryContext must honor the context timeout and return when it is canceled.
+func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+	defer s.request.Reset()
+
+	client.EncodeQuery(s.request, s.db, s.id, args)
+
+	if err := s.client.Call(ctx, s.request, s.response); err != nil {
+		return nil, driverError(err)
+	}
+
+	rows, err := client.DecodeRows(s.response)
+	if err != nil {
+		return nil, driverError(err)
+	}
+
+	return &Rows{ctx: ctx, request: s.request, response: s.response, client: s.client, rows: rows}, nil
+}
+
+// Query executes a query that may return rows, such as a
+func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) {
+	return s.QueryContext(context.Background(), valuesToNamedValues(args))
+}
+
+// Result is the result of a query execution.
+type Result struct {
+	result client.Result
+}
+
+// LastInsertId returns the database's auto-generated ID
+// after, for example, an INSERT into a table with primary
+// key.
+func (r *Result) LastInsertId() (int64, error) {
+	return int64(r.result.LastInsertID), nil
+}
+
+// RowsAffected returns the number of rows affected by the
+// query.
+func (r *Result) RowsAffected() (int64, error) {
+	return int64(r.result.RowsAffected), nil
+}
+
+// Rows is an iterator over an executed query's results.
+type Rows struct {
+	ctx      context.Context
+	client   *client.Client
+	request  *client.Message
+	response *client.Message
+	rows     client.Rows
+	consumed bool
+}
+
+// Columns returns the names of the columns. The number of
+// columns of the result is inferred from the length of the
+// slice. If a particular column name isn't known, an empty
+// string should be returned for that entry.
+func (r *Rows) Columns() []string {
+	return r.rows.Columns
+}
+
+// Close closes the rows iterator.
+func (r *Rows) Close() error {
+	r.rows.Close()
+
+	// If we consumed the whole result set, there's nothing to do as
+	// there's no pending response from the server.
+	if r.consumed {
+		return nil
+	}
+
+	r.rows.Close()
+
+	// Let's issue an interrupt request and wait until we get an empty
+	// response, signalling that the query was interrupted.
+	if err := r.client.Interrupt(r.ctx, r.request, r.response); err != nil {
+		return driverError(err)
+	}
+
+	return nil
+}
+
+// Next is called to populate the next row of data into
+// the provided slice. The provided slice will be the same
+// size as the Columns() are wide.
+//
+// Next should return io.EOF when there are no more rows.
+func (r *Rows) Next(dest []driver.Value) error {
+	err := r.rows.Next(dest)
+
+	if err == client.ErrRowsPart {
+		r.rows.Close()
+		if err := r.client.More(r.ctx, r.response); err != nil {
+			return driverError(err)
+		}
+		rows, err := client.DecodeRows(r.response)
+		if err != nil {
+			return driverError(err)
+		}
+		r.rows = rows
+		return r.rows.Next(dest)
+	}
+
+	if err == io.EOF {
+		r.consumed = true
+	}
+
+	return err
+}
+
+// ColumnTypeScanType implements RowsColumnTypeScanType.
+func (r *Rows) ColumnTypeScanType(i int) reflect.Type {
+	// column := sql.NewColumn(r.rows, i)
+
+	// typ, err := r.client.ColumnTypeScanType(context.Background(), column)
+	// if err != nil {
+	// 	return nil
+	// }
+
+	// return typ.DriverType()
+	return nil
+}
+
+// ColumnTypeDatabaseTypeName implements RowsColumnTypeDatabaseTypeName.
+func (r *Rows) ColumnTypeDatabaseTypeName(i int) string {
+	// column := sql.NewColumn(r.rows, i)
+
+	// typeName, err := r.client.ColumnTypeDatabaseTypeName(context.Background(), column)
+	// if err != nil {
+	// 	return ""
+	// }
+
+	// return typeName.Value
+	return ""
+}
+
+// Convert a driver.Value slice into a driver.NamedValue slice.
+func valuesToNamedValues(args []driver.Value) []driver.NamedValue {
+	namedValues := make([]driver.NamedValue, len(args))
+	for i, value := range args {
+		namedValues[i] = driver.NamedValue{
+			Ordinal: i + 1,
+			Value:   value,
+		}
+	}
+	return namedValues
+}
+
+func driverError(err error) error {
+	switch err := errors.Cause(err).(type) {
+	case *net.OpError:
+		return driver.ErrBadConn
+	case client.ErrRequest:
+		switch err.Code {
+		case bindings.ErrIoErrNotLeader:
+			fallthrough
+		case bindings.ErrIoErrLeadershipLost:
+			return driver.ErrBadConn
+		default:
+			return DriverError{
+				Code:    int(err.Code),
+				Message: err.Description,
+			}
+		}
+	}
+	return err
+}
+
+func init() {
+	err := bindings.Init()
+	if err != nil {
+		panic(errors.Wrap(err, "failed to initialize dqlite"))
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/fsm.go b/vendor/github.com/CanonicalLtd/go-dqlite/fsm.go
new file mode 100644
index 0000000000..c1d15015e9
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/fsm.go
@@ -0,0 +1,17 @@
+package dqlite
+
+import (
+	"github.com/CanonicalLtd/go-dqlite/internal/replication"
+	"github.com/hashicorp/raft"
+)
+
+// NewFSM creates a new dqlite FSM, suitable to be passed to raft.NewRaft.
+//
+// It will handle replication of the SQLite write-ahead log.
+//
+// This is mostly an internal implementation detail of dqlite, but it needs to
+// be exposed since the raft.Raft parameter that NewDriver accepts doesn't
+// allow access to the FSM that it was passed when created with raft.NewRaft().
+func NewFSM(r *Registry) raft.FSM {
+	return replication.NewFSM(r.registry)
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/build.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/build.go
new file mode 100644
index 0000000000..e1ac1f9960
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/build.go
@@ -0,0 +1,6 @@
+package bindings
+
+/*
+#cgo linux LDFLAGS: -lsqlite3 -ldqlite
+*/
+import "C"
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/cluster.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/cluster.go
new file mode 100644
index 0000000000..797d3c8a07
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/cluster.go
@@ -0,0 +1,249 @@
+package bindings
+
+/*
+#include <assert.h>
+#include <stdlib.h>
+
+#include <dqlite.h>
+
+// Go land callbacks for dqlite_cluster methods.
+char *clusterLeaderCb(uintptr_t handle);
+int clusterServersCb(uintptr_t handle, dqlite_server_info **servers);
+void clusterRegisterCb(uintptr_t handle, sqlite3 *db);
+void clusterUnregisterCb(uintptr_t handle, sqlite3 *db);
+int clusterBarrierCb(uintptr_t handle);
+int clusterRecoverCb(uintptr_t handle, uint64_t txToken);
+int clusterCheckpointCb(uintptr_t handle, sqlite3 *db);
+
+// Implementation of xLeader.
+static const char* dqlite__cluster_leader(void *ctx) {
+  assert(ctx != NULL);
+
+  return clusterLeaderCb((uintptr_t)ctx);
+}
+
+// Implementation of xServers.
+static int dqlite__cluster_servers(void *ctx, dqlite_server_info *servers[]) {
+  assert(ctx != NULL);
+
+  return clusterServersCb((uintptr_t)ctx, servers);
+}
+
+// Implementation of xRegister.
+static void dqlite__cluster_register(void *ctx, sqlite3 *db) {
+  assert(ctx != NULL);
+
+  clusterRegisterCb((uintptr_t)ctx, db);
+}
+
+// Implementation of xUnregister.
+static void dqlite__cluster_unregister(void *ctx, sqlite3 *db) {
+  assert(ctx != NULL);
+
+  clusterUnregisterCb((uintptr_t)ctx, db);
+}
+
+// Implementation of xBarrier.
+static int dqlite__cluster_barrier(void *ctx) {
+  assert(ctx != NULL);
+
+  return clusterBarrierCb((uintptr_t)ctx);
+}
+
+// Implementation of of xRecover.
+static int dqlite__cluster_recover(void *ctx, uint64_t tx_token) {
+  assert(ctx != NULL);
+
+  return clusterRecoverCb((uintptr_t)ctx, tx_token);
+}
+
+// Implementation of of xCheckpoint.
+static int dqlite__cluster_checkpoint(void *ctx, sqlite3 *db) {
+  assert(ctx != NULL);
+
+  return clusterCheckpointCb((uintptr_t)ctx, db);
+}
+
+// Constructor.
+static dqlite_cluster *dqlite__cluster_create(uintptr_t handle)
+{
+  dqlite_cluster *c = sqlite3_malloc(sizeof *c);
+  if (c == NULL) {
+    return NULL;
+  }
+
+  c->ctx = (void*)handle;
+  c->xLeader = dqlite__cluster_leader;
+  c->xServers = dqlite__cluster_servers;
+  c->xRegister = dqlite__cluster_register;
+  c->xUnregister = dqlite__cluster_unregister;
+  c->xBarrier = dqlite__cluster_barrier;
+  c->xRecover = dqlite__cluster_recover;
+  c->xCheckpoint = dqlite__cluster_checkpoint;
+
+  return c;
+}
+*/
+import "C"
+import (
+	"unsafe"
+)
+
+// Cluster is a Go wrapper around the associated dqlite's C type.
+type Cluster C.dqlite_cluster
+
+// NewCluster creates a new Cluster object set with the given method hooks..
+func NewCluster(methods ClusterMethods) (*Cluster, error) {
+	handle := clusterMethodsSerial
+	clusterMethodsIndex[handle] = methods
+	clusterMethodsSerial++
+
+	cluster := C.dqlite__cluster_create(handle)
+	if cluster == nil {
+		return nil, codeToError(C.SQLITE_NOMEM)
+	}
+
+	return (*Cluster)(unsafe.Pointer(cluster)), nil
+}
+
+// Close releases all memory associated with the cluster object.
+func (c *Cluster) Close() {
+	cluster := (*C.dqlite_cluster)(unsafe.Pointer(c))
+
+	handle := (C.uintptr_t)(uintptr(cluster.ctx))
+	delete(clusterMethodsIndex, handle)
+
+	C.sqlite3_free(unsafe.Pointer(cluster))
+}
+
+// ServerInfo is the Go equivalent of dqlite_server_info.
+type ServerInfo struct {
+	ID      uint64
+	Address string
+}
+
+// ClusterMethods implements the interface required by the various hooks
+// dqlite_cluster.
+type ClusterMethods interface {
+	// Return the address of the current cluster leader, if any. If not
+	// empty, the address string must a be valid network IP or hostname,
+	// that clients can use to connect to a dqlite service.
+	Leader() string
+
+	// If this driver is the current leader of the cluster, return the
+	// addresses of all other servers. Each address must be a valid IP or
+	// host name name, that clients can use to connect to the relevant
+	// dqlite service , in case the current leader is deposed and a new one
+	// is elected.
+	//
+	// If this driver is not the current leader of the cluster, an error
+	// implementing the Error interface below and returning true in
+	// NotLeader() must be returned.
+	Servers() ([]ServerInfo, error)
+
+	Register(*Conn)
+	Unregister(*Conn)
+
+	Barrier() error
+
+	Recover(token uint64) error
+
+	Checkpoint(*Conn) error
+}
+
+// Map uintptr to Cluster instances to avoid passing Go pointers to C.
+//
+// We do not protect this map with a lock since typically just one long-lived
+// Cluster instance should be registered (except for unit tests).
+var clusterMethodsSerial C.uintptr_t = 100
+var clusterMethodsIndex = map[C.uintptr_t]ClusterMethods{}
+
+//export clusterLeaderCb
+func clusterLeaderCb(handle C.uintptr_t) *C.char {
+	cluster := clusterMethodsIndex[handle]
+
+	// It's responsibility of calling code to free() this string.
+	return C.CString(cluster.Leader())
+}
+
+//export clusterServersCb
+func clusterServersCb(handle C.uintptr_t, out **C.dqlite_server_info) C.int {
+	cluster := clusterMethodsIndex[handle]
+
+	servers, err := cluster.Servers()
+	if err != nil {
+		*out = nil
+		return C.int(ErrorCode(err))
+	}
+
+	n := C.size_t(len(servers)) + 1
+
+	// It's responsibility of calling code to free() this array of servers.
+	size := unsafe.Sizeof(C.dqlite_server_info{})
+	*out = (*C.dqlite_server_info)(C.malloc(n * C.size_t(size)))
+
+	if *out == nil {
+		return C.SQLITE_NOMEM
+	}
+
+	for i := C.size_t(0); i < n; i++ {
+		server := (*C.dqlite_server_info)(unsafe.Pointer(uintptr(unsafe.Pointer(*out)) + size*uintptr(i)))
+
+		if i == n-1 {
+			server.id = 0
+			server.address = nil
+		} else {
+			server.id = C.uint64_t(servers[i].ID)
+			server.address = C.CString(servers[i].Address)
+		}
+	}
+
+	return C.int(0)
+}
+
+//export clusterRegisterCb
+func clusterRegisterCb(handle C.uintptr_t, db *C.sqlite3) {
+	cluster := clusterMethodsIndex[handle]
+	cluster.Register((*Conn)(unsafe.Pointer(db)))
+}
+
+//export clusterUnregisterCb
+func clusterUnregisterCb(handle C.uintptr_t, db *C.sqlite3) {
+	cluster := clusterMethodsIndex[handle]
+	cluster.Unregister((*Conn)(unsafe.Pointer(db)))
+}
+
+//export clusterBarrierCb
+func clusterBarrierCb(handle C.uintptr_t) C.int {
+	cluster := clusterMethodsIndex[handle]
+
+	if err := cluster.Barrier(); err != nil {
+		return C.int(ErrorCode(err))
+	}
+
+	return 0
+}
+
+//export clusterRecoverCb
+func clusterRecoverCb(handle C.uintptr_t, txToken C.uint64_t) C.int {
+	cluster := clusterMethodsIndex[handle]
+
+	err := cluster.Recover(uint64(txToken))
+	if err != nil {
+		return C.int(ErrorCode(err))
+	}
+
+	return C.int(0)
+}
+
+//export clusterCheckpointCb
+func clusterCheckpointCb(handle C.uintptr_t, db *C.sqlite3) C.int {
+	cluster := clusterMethodsIndex[handle]
+
+	err := cluster.Checkpoint((*Conn)(unsafe.Pointer(db)))
+	if err != nil {
+		return C.int(ErrorCode(err))
+	}
+
+	return C.int(0)
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/config.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/config.go
new file mode 100644
index 0000000000..42d0f3b1da
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/config.go
@@ -0,0 +1,39 @@
+package bindings
+
+/*
+#include <sqlite3.h>
+
+// Wrapper around sqlite3_db_config() for invoking the
+// SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE opcode, since there's no way to use C
+// varargs from Go.
+static int sqlite3__db_config_no_ckpt_on_close(sqlite3 *db, int value, int *pValue) {
+  return sqlite3_db_config(db, SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE, value, pValue);
+}
+*/
+import "C"
+import (
+	"unsafe"
+
+	"github.com/pkg/errors"
+)
+
+// ConfigNoCkptOnClose switches on or off the automatic WAL checkpoint when a
+// connection is closed.
+func (c *Conn) ConfigNoCkptOnClose(flag bool) (bool, error) {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	var in C.int
+	var out C.int
+
+	if flag {
+		in = 1
+	}
+
+	rc := C.sqlite3__db_config_no_ckpt_on_close(db, in, &out)
+	if rc != C.SQLITE_OK {
+		err := lastError(db)
+		return false, errors.Wrap(err, "failed to config checkpoint on close")
+	}
+
+	return out == 1, nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/conn.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/conn.go
new file mode 100644
index 0000000000..09197a35be
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/conn.go
@@ -0,0 +1,132 @@
+package bindings
+
+/*
+#include <stdlib.h>
+#include <sqlite3.h>
+*/
+import "C"
+import (
+	"database/sql/driver"
+	"io"
+	"unsafe"
+)
+
+// Open modes.
+const (
+	OpenReadWrite = C.SQLITE_OPEN_READWRITE
+	OpenReadOnly  = C.SQLITE_OPEN_READONLY
+	OpenCreate    = C.SQLITE_OPEN_CREATE
+)
+
+// Conn is a Go wrapper around a SQLite database handle.
+type Conn C.sqlite3
+
+// Open a SQLite connection.
+func Open(name string, vfs string) (*Conn, error) {
+	flags := OpenReadWrite | OpenCreate
+
+	// Open the database.
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+
+	cvfs := C.CString(vfs)
+	defer C.free(unsafe.Pointer(cvfs))
+
+	var db *C.sqlite3
+
+	rc := C.sqlite3_open_v2(cname, &db, C.int(flags), cvfs)
+	if rc != C.SQLITE_OK {
+		err := lastError(db)
+		C.sqlite3_close_v2(db)
+		return nil, err
+	}
+
+	return (*Conn)(unsafe.Pointer(db)), nil
+}
+
+// Close the connection.
+func (c *Conn) Close() error {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	rc := C.sqlite3_close(db)
+	if rc != C.SQLITE_OK {
+		return lastError(db)
+	}
+
+	return nil
+}
+
+// Filename of the underlying database file.
+func (c *Conn) Filename() string {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	return C.GoString(C.sqlite3_db_filename(db, walReplicationSchema))
+}
+
+// Exec executes a query.
+func (c *Conn) Exec(query string) error {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	sql := C.CString(query)
+	defer C.free(unsafe.Pointer(sql))
+
+	rc := C.sqlite3_exec(db, sql, nil, nil, nil)
+	if rc != C.SQLITE_OK {
+		return lastError(db)
+	}
+
+	return nil
+}
+
+// Query the database.
+func (c *Conn) Query(query string) (*Rows, error) {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	var stmt *C.sqlite3_stmt
+	var tail *C.char
+
+	sql := C.CString(query)
+	defer C.free(unsafe.Pointer(sql))
+
+	rc := C.sqlite3_prepare(db, sql, C.int(-1), &stmt, &tail)
+	if rc != C.SQLITE_OK {
+		return nil, lastError(db)
+	}
+
+	rows := &Rows{db: db, stmt: stmt}
+
+	return rows, nil
+}
+
+// Rows represents a result set.
+type Rows struct {
+	db   *C.sqlite3
+	stmt *C.sqlite3_stmt
+}
+
+// Next fetches the next row of a result set.
+func (r *Rows) Next(values []driver.Value) error {
+	rc := C.sqlite3_step(r.stmt)
+	if rc == C.SQLITE_DONE {
+		return io.EOF
+	}
+	if rc != C.SQLITE_ROW {
+		return lastError(r.db)
+	}
+
+	for i := range values {
+		values[i] = int64(C.sqlite3_column_int64(r.stmt, C.int(i)))
+	}
+
+	return nil
+}
+
+// Close finalizes the underlying statement.
+func (r *Rows) Close() error {
+	rc := C.sqlite3_finalize(r.stmt)
+	if rc != C.SQLITE_OK {
+		return lastError(r.db)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/datatype.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/datatype.go
new file mode 100644
index 0000000000..9f4e4dcb77
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/datatype.go
@@ -0,0 +1,23 @@
+package bindings
+
+/*
+#include <sqlite3.h>
+#include <dqlite.h>
+*/
+import "C"
+
+// SQLite datatype codes
+const (
+	Integer = C.SQLITE_INTEGER
+	Float   = C.SQLITE_FLOAT
+	Text    = C.SQLITE_TEXT
+	Blob    = C.SQLITE_BLOB
+	Null    = C.SQLITE_NULL
+)
+
+// Special data types for time values.
+const (
+	UnixTime = C.DQLITE_UNIXTIME
+	ISO8601  = C.DQLITE_ISO8601
+	Boolean  = C.DQLITE_BOOLEAN
+)
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/errors.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/errors.go
new file mode 100644
index 0000000000..c80a19cfc5
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/errors.go
@@ -0,0 +1,58 @@
+package bindings
+
+/*
+#include <sqlite3.h>
+*/
+import "C"
+import (
+	"github.com/pkg/errors"
+)
+
+// Error holds information about a SQLite error.
+type Error struct {
+	Code    int
+	Message string
+}
+
+func (e Error) Error() string {
+	if e.Message != "" {
+		return e.Message
+	}
+	return C.GoString(C.sqlite3_errstr(C.int(e.Code)))
+}
+
+// Error codes.
+const (
+	ErrError               = C.SQLITE_ERROR
+	ErrInternal            = C.SQLITE_INTERNAL
+	ErrNoMem               = C.SQLITE_NOMEM
+	ErrInterrupt           = C.SQLITE_INTERRUPT
+	ErrBusy                = C.SQLITE_BUSY
+	ErrIoErr               = C.SQLITE_IOERR
+	ErrIoErrNotLeader      = C.SQLITE_IOERR_NOT_LEADER
+	ErrIoErrLeadershipLost = C.SQLITE_IOERR_LEADERSHIP_LOST
+)
+
+// ErrorCode extracts a SQLite error code from a Go error.
+func ErrorCode(err error) int {
+	if err, ok := errors.Cause(err).(Error); ok {
+		return err.Code
+	}
+
+	// Return a generic error.
+	return int(C.SQLITE_ERROR)
+}
+
+// Create a Go error with the code and message of the last error happened on
+// the given database.
+func lastError(db *C.sqlite3) Error {
+	return Error{
+		Code:    int(C.sqlite3_extended_errcode(db)),
+		Message: C.GoString(C.sqlite3_errmsg(db)),
+	}
+}
+
+// codeToError converts a SQLite error code to a Go error.
+func codeToError(rc C.int) error {
+	return Error{Code: int(rc)}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/logger.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/logger.go
new file mode 100644
index 0000000000..5493ff3db0
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/logger.go
@@ -0,0 +1,117 @@
+package bindings
+
+/*
+#include <assert.h>
+#include <stdlib.h>
+
+#include <dqlite.h>
+
+// Silence warnings.
+extern int vasprintf(char **strp, const char *fmt, va_list ap);
+
+// Go land callback for xLogf.
+void dqliteLoggerLogfCb(uintptr_t handle, int level, char *msg);
+
+// Implementation of xLogf.
+static void dqliteLoggerLogf(void *ctx, int level, const char *format, va_list args) {
+  uintptr_t handle;
+  char *msg;
+  int err;
+
+  assert(ctx != NULL);
+
+  handle = (uintptr_t)ctx;
+
+  err = vasprintf(&msg, format, args);
+  if (err < 0) {
+    // Ignore errors
+    return;
+  }
+
+  dqliteLoggerLogfCb(handle, level, (char*)msg);
+
+  free(msg);
+}
+
+// Constructor.
+static dqlite_logger *dqlite__logger_create(uintptr_t handle) {
+  dqlite_logger *logger = sqlite3_malloc(sizeof *logger);
+
+  if (logger == NULL) {
+    return NULL;
+  }
+
+  logger->data = (void*)handle;
+  logger->emit = dqliteLoggerLogf;
+
+  return logger;
+}
+*/
+import "C"
+import (
+	"unsafe"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/logging"
+)
+
+// Logger is a Go wrapper around the associated dqlite's C type.
+type Logger C.dqlite_logger
+
+// Logging levels.
+const (
+	LogDebug = C.DQLITE_LOG_DEBUG
+	LogInfo  = C.DQLITE_LOG_INFO
+	LogWarn  = C.DQLITE_LOG_WARN
+	LogError = C.DQLITE_LOG_ERROR
+)
+
+// NewLogger creates a new Logger object set with the given log function.
+func NewLogger(f logging.Func) *Logger {
+	// Register the logger implementation and pass its handle to
+	// dqliteLoggerInit.
+	handle := loggerFuncsSerial
+
+	loggerFuncsIndex[handle] = f
+	loggerFuncsSerial++
+
+	logger := C.dqlite__logger_create(C.uintptr_t(handle))
+	if logger == nil {
+		panic("out of memory")
+	}
+
+	return (*Logger)(unsafe.Pointer(logger))
+}
+
+// Close releases all memory associated with the logger object.
+func (l *Logger) Close() {
+	logger := (*C.dqlite_logger)(unsafe.Pointer(l))
+	handle := (C.uintptr_t)(uintptr(logger.data))
+
+	delete(loggerFuncsIndex, handle)
+
+	C.sqlite3_free(unsafe.Pointer(logger))
+}
+
+// Map uintptr to logging.Func instances to avoid passing Go pointers to C.
+//
+// We do not protect this map with a lock since typically just one long-lived
+// Logger instance should be registered (except for unit tests).
+var loggerFuncsSerial C.uintptr_t = 100
+var loggerFuncsIndex = map[C.uintptr_t]logging.Func{}
+
+//export dqliteLoggerLogfCb
+func dqliteLoggerLogfCb(handle C.uintptr_t, level C.int, msg *C.char) {
+	f := loggerFuncsIndex[handle]
+
+	message := C.GoString(msg)
+	switch level {
+	case LogDebug:
+		f(logging.Debug, message)
+	case LogInfo:
+		f(logging.Info, message)
+	case LogWarn:
+		f(logging.Warn, message)
+	case LogError:
+		f(logging.Error, message)
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/server.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/server.go
new file mode 100644
index 0000000000..e173e5a3b5
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/server.go
@@ -0,0 +1,223 @@
+package bindings
+
+/*
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <dqlite.h>
+#include <sqlite3.h>
+
+int dup_cloexec(int oldfd) {
+	int newfd = -1;
+
+	newfd = dup(oldfd);
+	if (newfd < 0) {
+		return -1;
+	}
+
+	if (fcntl(newfd, F_SETFD, FD_CLOEXEC) < 0) {
+		return -1;
+	}
+
+	return newfd;
+}
+*/
+import "C"
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"unsafe"
+
+	"github.com/pkg/errors"
+)
+
+// ProtocolVersion is the latest dqlite server protocol version.
+const ProtocolVersion = uint64(C.DQLITE_PROTOCOL_VERSION)
+
+// Request types.
+const (
+	RequestLeader    = C.DQLITE_REQUEST_LEADER
+	RequestClient    = C.DQLITE_REQUEST_CLIENT
+	RequestHeartbeat = C.DQLITE_REQUEST_HEARTBEAT
+	RequestOpen      = C.DQLITE_REQUEST_OPEN
+	RequestPrepare   = C.DQLITE_REQUEST_PREPARE
+	RequestExec      = C.DQLITE_REQUEST_EXEC
+	RequestQuery     = C.DQLITE_REQUEST_QUERY
+	RequestFinalize  = C.DQLITE_REQUEST_FINALIZE
+	RequestExecSQL   = C.DQLITE_REQUEST_EXEC_SQL
+	RequestQuerySQL  = C.DQLITE_REQUEST_QUERY_SQL
+	RequestInterrupt = C.DQLITE_REQUEST_INTERRUPT
+)
+
+// Response types.
+const (
+	ResponseFailure = C.DQLITE_RESPONSE_FAILURE
+	ResponseServer  = C.DQLITE_RESPONSE_SERVER
+	ResponseWelcome = C.DQLITE_RESPONSE_WELCOME
+	ResponseServers = C.DQLITE_RESPONSE_SERVERS
+	ResponseDb      = C.DQLITE_RESPONSE_DB
+	ResponseStmt    = C.DQLITE_RESPONSE_STMT
+	ResponseResult  = C.DQLITE_RESPONSE_RESULT
+	ResponseRows    = C.DQLITE_RESPONSE_ROWS
+	ResponseEmpty   = C.DQLITE_RESPONSE_EMPTY
+)
+
+// Server is a Go wrapper arround dqlite_server.
+type Server C.dqlite_server
+
+// Init initializes dqlite global state.
+func Init() error {
+	var errmsg *C.char
+
+	rc := C.dqlite_init(&errmsg)
+	if rc != 0 {
+		return fmt.Errorf("%s (%d)", C.GoString(errmsg), rc)
+	}
+	return nil
+}
+
+// NewServer creates a new Server instance.
+func NewServer(cluster *Cluster) (*Server, error) {
+	var server *C.dqlite_server
+
+	rc := C.dqlite_server_create((*C.dqlite_cluster)(unsafe.Pointer(cluster)), &server)
+	if rc != 0 {
+		err := codeToError(rc)
+		return nil, errors.Wrap(err, "failed to create server object")
+	}
+
+	return (*Server)(unsafe.Pointer(server)), nil
+}
+
+// Close the server releasing all used resources.
+func (s *Server) Close() {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	C.dqlite_server_destroy(server)
+}
+
+// SetLogger sets the server logger.
+func (s *Server) SetLogger(logger *Logger) {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	rc := C.dqlite_server_config(server, C.DQLITE_CONFIG_LOGGER, unsafe.Pointer(logger))
+	if rc != 0 {
+		// Setting the logger should never fail.
+		panic("failed to set logger")
+	}
+}
+
+// SetVfs sets the name of the VFS to use for new connections.
+func (s *Server) SetVfs(name string) {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+
+	rc := C.dqlite_server_config(server, C.DQLITE_CONFIG_VFS, unsafe.Pointer(cname))
+	if rc != 0 {
+		// Setting the logger should never fail.
+		panic("failed to set vfs")
+	}
+}
+
+// SetWalReplication sets the name of the WAL replication to use for new connections.
+func (s *Server) SetWalReplication(name string) {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+
+	rc := C.dqlite_server_config(server, C.DQLITE_CONFIG_WAL_REPLICATION, unsafe.Pointer(cname))
+	if rc != 0 {
+		// Setting the logger should never fail.
+		panic("failed to set WAL replication")
+	}
+}
+
+// Run the server.
+//
+// After this method is called it's possible to invoke Handle().
+func (s *Server) Run() error {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	var errmsg *C.char
+
+	rc := C.dqlite_server_run(server)
+	if rc != 0 {
+		return fmt.Errorf(C.GoString(errmsg))
+	}
+
+	return nil
+}
+
+// Ready waits for the server to be ready to handle connections.
+func (s *Server) Ready() bool {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	return C.dqlite_server_ready(server) == 1
+}
+
+// Handle a new connection.
+func (s *Server) Handle(conn net.Conn) error {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	file, err := conn.(fileConn).File()
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+
+	fd1 := C.int(file.Fd())
+
+	// Duplicate the file descriptor, in order to prevent Go's finalizer to
+	// close it.
+	fd2 := C.dup_cloexec(fd1)
+	if fd2 < 0 {
+		return fmt.Errorf("failed to dup socket fd")
+	}
+
+	conn.Close()
+
+	var errmsg *C.char
+
+	rc := C.dqlite_server_handle(server, fd2, &errmsg)
+	if rc != 0 {
+		C.close(fd2)
+		defer C.sqlite3_free(unsafe.Pointer(errmsg))
+		if rc == C.DQLITE_STOPPED {
+			return ErrServerStopped
+		}
+		return fmt.Errorf(C.GoString(errmsg))
+	}
+
+	return nil
+}
+
+// Interface that net.Conn must implement in order to extract the underlying
+// file descriptor.
+type fileConn interface {
+	File() (*os.File, error)
+}
+
+// Stop the server.
+func (s *Server) Stop() error {
+	server := (*C.dqlite_server)(unsafe.Pointer(s))
+
+	var errmsg *C.char
+
+	rc := C.dqlite_server_stop(server, &errmsg)
+	if rc != 0 {
+		return fmt.Errorf(C.GoString(errmsg))
+	}
+
+	return nil
+}
+
+// ErrServerStopped is returned by Server.Handle() is the server was stopped.
+var ErrServerStopped = fmt.Errorf("server was stopped")
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/status.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/status.go
new file mode 100644
index 0000000000..082e68731e
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/status.go
@@ -0,0 +1,43 @@
+package bindings
+
+/*
+#include <sqlite3.h>
+*/
+import "C"
+
+// StatusMallocCount returns the current and highest number of memory
+// allocations performed with sqlite3_malloc.
+func StatusMallocCount(reset bool) (int, int, error) {
+	var current C.int
+	var highest C.int
+	var flag C.int
+
+	if reset {
+		flag = 1
+	}
+
+	rc := C.sqlite3_status(C.SQLITE_STATUS_MALLOC_COUNT, &current, &highest, flag)
+	if rc != C.SQLITE_OK {
+		return -1, -1, codeToError(rc)
+	}
+
+	return int(current), int(highest), nil
+}
+
+// StatusMemoryUsed returns the current and highest allocation size.
+func StatusMemoryUsed(reset bool) (int, int, error) {
+	var current C.int
+	var highest C.int
+	var flag C.int
+
+	if reset {
+		flag = 1
+	}
+
+	rc := C.sqlite3_status(C.SQLITE_STATUS_MEMORY_USED, &current, &highest, flag)
+	if rc != C.SQLITE_OK {
+		return -1, -1, codeToError(rc)
+	}
+
+	return int(current), int(highest), nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/testing.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/testing.go
new file mode 100644
index 0000000000..9c8a207e1a
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/testing.go
@@ -0,0 +1,24 @@
+package bindings
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// AssertNoMemoryLeaks is a test helper asserting that current allocation count
+// and used memory are both zero.
+func AssertNoMemoryLeaks(t *testing.T) {
+	t.Helper()
+
+	current, _, err := StatusMallocCount(true)
+	require.NoError(t, err)
+
+	assert.Equal(t, 0, current, "malloc count leak")
+
+	current, _, err = StatusMemoryUsed(true)
+	require.NoError(t, err)
+
+	assert.Equal(t, 0, current, "memory leak")
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/vfs.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/vfs.go
new file mode 100644
index 0000000000..bcc500ebf6
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/vfs.go
@@ -0,0 +1,106 @@
+package bindings
+
+/*
+#include <stdlib.h>
+
+#include <sqlite3.h>
+#include <dqlite.h>
+*/
+import "C"
+import (
+	"unsafe"
+)
+
+// Vfs is a Go wrapper around dqlite's in-memory VFS implementation.
+type Vfs C.sqlite3_vfs
+
+// NewVfs registers an in-memory VFS instance under the given name.
+func NewVfs(name string, logger *Logger) (*Vfs, error) {
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+
+	if vfs := C.sqlite3_vfs_find(cname); vfs != nil {
+		err := Error{Code: C.SQLITE_ERROR, Message: "vfs name already registered"}
+		return nil, err
+	}
+
+	clogger := (*C.dqlite_logger)(unsafe.Pointer(logger))
+
+	vfs := C.dqlite_vfs_create(cname, clogger)
+	if vfs == nil {
+		return nil, codeToError(C.SQLITE_NOMEM)
+	}
+
+	rc := C.sqlite3_vfs_register(vfs, 0)
+	if rc != 0 {
+		return nil, codeToError(rc)
+	}
+
+	return (*Vfs)(unsafe.Pointer(vfs)), nil
+}
+
+// Close unregisters this in-memory VFS instance.
+func (v *Vfs) Close() error {
+	vfs := (*C.sqlite3_vfs)(unsafe.Pointer(v))
+
+	rc := C.sqlite3_vfs_unregister(vfs)
+	if rc != 0 {
+		return codeToError(rc)
+	}
+
+	C.dqlite_vfs_destroy(vfs)
+
+	return nil
+}
+
+// Name returns the registration name of the vfs.
+func (v *Vfs) Name() string {
+	vfs := (*C.sqlite3_vfs)(unsafe.Pointer(v))
+
+	return C.GoString(vfs.zName)
+}
+
+// ReadFile returns the content of the given filename.
+func (v *Vfs) ReadFile(filename string) ([]byte, error) {
+	vfs := (*C.sqlite3_vfs)(unsafe.Pointer(v))
+
+	cfilename := C.CString(filename)
+	defer C.free(unsafe.Pointer(cfilename))
+
+	var buf *C.uint8_t
+	var n C.size_t
+
+	rc := C.dqlite_file_read(vfs.zName, cfilename, &buf, &n)
+	if rc != 0 {
+		return nil, Error{Code: int(rc)}
+	}
+
+	content := C.GoBytes(unsafe.Pointer(buf), C.int(n))
+
+	C.sqlite3_free(unsafe.Pointer(buf))
+
+	return content, nil
+}
+
+// WriteFile write the content of the given filename, overriding it if it
+// exists.
+func (v *Vfs) WriteFile(filename string, bytes []byte) error {
+	if len(bytes) == 0 {
+		return nil
+	}
+
+	vfs := (*C.sqlite3_vfs)(unsafe.Pointer(v))
+
+	cfilename := C.CString(filename)
+	defer C.free(unsafe.Pointer(cfilename))
+
+	buf := (*C.uint8_t)(unsafe.Pointer(&bytes[0]))
+	n := C.size_t(len(bytes))
+
+	rc := C.dqlite_file_write(vfs.zName, cfilename, buf, n)
+	if rc != 0 {
+		return Error{Code: int(rc & 0xff)}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal.go
new file mode 100644
index 0000000000..cba970d7d6
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal.go
@@ -0,0 +1,41 @@
+package bindings
+
+/*
+#include <stdlib.h>
+#include <sqlite3.h>
+*/
+import "C"
+import "unsafe"
+
+// WalCheckpointMode defines all valid values for the "checkpoint mode" parameter
+// of the WalCheckpointV2 API. See https://sqlite.org/c3ref/wal_checkpoint_v2.html.
+type WalCheckpointMode int
+
+// WAL checkpoint modes
+const (
+	WalCheckpointPassive  = WalCheckpointMode(C.SQLITE_CHECKPOINT_PASSIVE)
+	WalCheckpointFull     = WalCheckpointMode(C.SQLITE_CHECKPOINT_FULL)
+	WalCheckpointRestart  = WalCheckpointMode(C.SQLITE_CHECKPOINT_RESTART)
+	WalCheckpointTruncate = WalCheckpointMode(C.SQLITE_CHECKPOINT_TRUNCATE)
+)
+
+// WalCheckpoint triggers a WAL checkpoint on the given database attached to the
+// connection. See https://sqlite.org/c3ref/wal_checkpoint_v2.html
+func (c *Conn) WalCheckpoint(schema string, mode WalCheckpointMode) (int, int, error) {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	var size C.int
+	var ckpt C.int
+	var err error
+
+	// Convert to C types
+	zDb := C.CString(schema)
+	defer C.free(unsafe.Pointer(zDb))
+
+	rc := C.sqlite3_wal_checkpoint_v2(db, zDb, C.int(mode), &size, &ckpt)
+	if rc != 0 {
+		return -1, -1, lastError(db)
+	}
+
+	return int(size), int(ckpt), err
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal_replication.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal_replication.go
new file mode 100644
index 0000000000..a3d677bc4f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/bindings/wal_replication.go
@@ -0,0 +1,409 @@
+package bindings
+
+/*
+#include <stdint.h>
+#include <stdlib.h>
+#include <sqlite3.h>
+#include <string.h>
+
+// WAL replication trampolines.
+int walReplicationBegin(uintptr_t handle, sqlite3 *db);
+int walReplicationAbort(uintptr_t handle, sqlite3 *db);
+int walReplicationFrames(uintptr_t handle, sqlite3 *db,
+      int, int, sqlite3_wal_replication_frame*, unsigned, int);
+int walReplicationUndo(uintptr_t handle, sqlite3 *db);
+int walReplicationEnd(uintptr_t handle, sqlite3 *db);
+
+// Wal replication methods.
+static int sqlite3__wal_replication_begin(sqlite3_wal_replication *r, void *arg)
+{
+  uintptr_t handle = (uintptr_t)(r->pAppData);
+  sqlite3 *db = (sqlite3*)(arg);
+  return walReplicationBegin(handle, db);
+}
+
+static int sqlite3__wal_replication_abort(sqlite3_wal_replication *r, void *arg)
+{
+  uintptr_t handle = (uintptr_t)(r->pAppData);
+  sqlite3 *db = (sqlite3*)(arg);
+  return walReplicationAbort(handle, db);
+}
+
+static int sqlite3__wal_replication_frames(sqlite3_wal_replication *r, void *arg,
+  int szPage, int nFrame, sqlite3_wal_replication_frame *aFrame,
+  unsigned nTruncate, int isCommit)
+{
+  uintptr_t handle = (uintptr_t)(r->pAppData);
+  sqlite3 *db = (sqlite3*)(arg);
+  return walReplicationFrames(handle, db, szPage, nFrame, aFrame, nTruncate, isCommit);
+}
+
+static int sqlite3__wal_replication_undo(sqlite3_wal_replication *r, void *arg)
+{
+  uintptr_t handle = (uintptr_t)(r->pAppData);
+  sqlite3 *db = (sqlite3*)(arg);
+  return walReplicationUndo(handle, db);
+}
+
+static int sqlite3__wal_replication_end(sqlite3_wal_replication *r, void *arg)
+{
+  uintptr_t handle = (uintptr_t)(r->pAppData);
+  sqlite3 *db = (sqlite3*)(arg);
+  return walReplicationEnd(handle, db);
+}
+
+// Constructor.
+static sqlite3_wal_replication *sqlite3__wal_replication_create(char *name, uintptr_t ctx){
+  sqlite3_wal_replication *replication;
+
+  replication = sqlite3_malloc(sizeof *replication);
+  if (replication == NULL) {
+    goto oom;
+  }
+
+  replication->iVersion = 1;
+
+  // Copy the name so the Go side can just free it.
+  replication->zName    = sqlite3_malloc(strlen(name));
+  if (replication->zName == NULL) {
+    goto oom_after_replication_malloc;
+  }
+  strcpy((char *)replication->zName, (const char*)name);
+
+  replication->pAppData = (void*)ctx;
+  replication->xBegin   = sqlite3__wal_replication_begin;
+  replication->xAbort   = sqlite3__wal_replication_abort;
+  replication->xFrames  = sqlite3__wal_replication_frames;
+  replication->xUndo    = sqlite3__wal_replication_undo;
+  replication->xEnd     = sqlite3__wal_replication_end;
+
+  return replication;
+
+oom_after_replication_malloc:
+  sqlite3_free(replication);
+
+oom:
+  return NULL;
+}
+
+// Destructor.
+static void sqlite3__wal_replication_destroy(sqlite3_wal_replication *replication) {
+  sqlite3_free((char *)replication->zName);
+  sqlite3_free(replication);
+}
+
+*/
+import "C"
+import (
+	"unsafe"
+)
+
+// WalReplication is a Go wrapper around the associated SQLite's C type.
+type WalReplication C.sqlite3_wal_replication
+
+// NewWalReplication registers a WAL replication instance under the given
+// name.
+func NewWalReplication(name string, methods WalReplicationMethods) (*WalReplication, error) {
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+
+	if r := C.sqlite3_wal_replication_find(cname); r != nil {
+		err := Error{Code: C.SQLITE_ERROR, Message: "WAL replication name already registered"}
+		return nil, err
+	}
+
+	handle := walReplicationMethodsSerial
+	walReplicationMethodsIndex[handle] = methods
+	walReplicationMethodsSerial++
+
+	replication := C.sqlite3__wal_replication_create(cname, handle)
+	if replication == nil {
+		return nil, codeToError(C.SQLITE_NOMEM)
+	}
+
+	rc := C.sqlite3_wal_replication_register(replication, 0)
+	if rc != 0 {
+		return nil, codeToError(rc)
+	}
+
+	return (*WalReplication)(unsafe.Pointer(replication)), nil
+}
+
+// Name returns the registration name of the Wal replication.
+func (r *WalReplication) Name() string {
+	replication := (*C.sqlite3_wal_replication)(unsafe.Pointer(r))
+
+	return C.GoString(replication.zName)
+}
+
+// Close unregisters and destroys this WAL replication instance.
+func (r *WalReplication) Close() error {
+	replication := (*C.sqlite3_wal_replication)(unsafe.Pointer(r))
+
+	rc := C.sqlite3_wal_replication_unregister(replication)
+	if rc != 0 {
+		return codeToError(rc)
+	}
+
+	handle := (C.uintptr_t)(uintptr(replication.pAppData))
+	delete(walReplicationMethodsIndex, handle)
+
+	C.sqlite3__wal_replication_destroy(replication)
+
+	return nil
+}
+
+// WalReplicationLeader switches the SQLite connection to leader WAL
+// replication mode.
+func (c *Conn) WalReplicationLeader(name string) error {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+
+	rc := C.sqlite3_wal_replication_leader(db, walReplicationSchema, cname, unsafe.Pointer(db))
+	if rc != C.SQLITE_OK {
+		return lastError(db)
+	}
+
+	return nil
+}
+
+// WalReplicationFollower switches the given SQLite connection to follower WAL
+// replication mode. In this mode no regular operation is possible, and the
+// connection should be driven with the WalReplicationFrames, and
+// WalReplicationUndo APIs.
+func (c *Conn) WalReplicationFollower() error {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	rc := C.sqlite3_wal_replication_follower(db, walReplicationSchema)
+	if rc != C.SQLITE_OK {
+		return lastError(db)
+	}
+
+	return nil
+}
+
+// WalReplicationFrames writes the given batch of frames to the write-ahead log
+// linked to the given connection.
+//
+// This method must be called with a "follower" connection, meant to replicate
+// the "leader" one.
+func (c *Conn) WalReplicationFrames(info WalReplicationFrameInfo) error {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	rc := C.sqlite3_wal_replication_frames(
+		db, walReplicationSchema, info.isBegin, info.szPage, info.nFrame,
+		info.aPgno, info.aPage, info.nTruncate, info.isCommit)
+	if rc != C.SQLITE_OK {
+		return lastError(db)
+	}
+
+	return nil
+}
+
+// WalReplicationUndo rollbacks a write transaction in the given sqlite
+// connection. This should be called with a "follower" connection, meant to
+// replicate the "leader" one.
+func (c *Conn) WalReplicationUndo() error {
+	db := (*C.sqlite3)(unsafe.Pointer(c))
+
+	rc := C.sqlite3_wal_replication_undo(db, walReplicationSchema)
+	if rc != C.SQLITE_OK {
+		return lastError(db)
+	}
+	return nil
+}
+
+// WalReplicationMethods implements the interface required by the various hooks
+// of sqlite3_wal_replication.
+type WalReplicationMethods interface {
+	// Begin a new write transaction. The implementation should check
+	// that the database is eligible for starting a replicated write
+	// transaction (e.g. this node is the leader), and perform internal
+	// state changes as appropriate.
+	Begin(*Conn) int
+
+	// Abort a write transaction. The implementation should clear any
+	// state previously set by the Begin hook.
+	Abort(*Conn) int
+
+	// Write new frames to the write-ahead log. The implementation should
+	// broadcast this write to other nodes and wait for a quorum.
+	Frames(*Conn, WalReplicationFrameList) int
+
+	// Undo a write transaction. The implementation should broadcast
+	// this event to other nodes and wait for a quorum. The return code
+	// is currently ignored by SQLite.
+	Undo(*Conn) int
+
+	// End a write transaction. The implementation should update its
+	// internal state and be ready for a new transaction.
+	End(*Conn) int
+}
+
+// PageNumber identifies a single database or WAL page.
+type PageNumber C.unsigned
+
+// FrameNumber identifies a single frame in the WAL.
+type FrameNumber C.unsigned
+
+// WalReplicationFrameList holds information about a single batch of WAL frames
+// that are being dispatched for replication by a leader connection.
+//
+// They map to the parameters of the sqlite3_wal_replication.xFrames API
+type WalReplicationFrameList struct {
+	szPage    C.int
+	nFrame    C.int
+	aFrame    *C.sqlite3_wal_replication_frame
+	nTruncate C.uint
+	isCommit  C.int
+}
+
+// PageSize returns the page size of this batch of WAL frames.
+func (l *WalReplicationFrameList) PageSize() int {
+	return int(l.szPage)
+}
+
+// Len returns the number of WAL frames in this batch.
+func (l *WalReplicationFrameList) Len() int {
+	return int(l.nFrame)
+}
+
+// Truncate returns the size of the database in pages after this batch of WAL
+// frames is applied.
+func (l *WalReplicationFrameList) Truncate() uint {
+	return uint(l.nTruncate)
+}
+
+// Frame returns information about the i'th frame in the batch.
+func (l *WalReplicationFrameList) Frame(i int) (unsafe.Pointer, PageNumber, FrameNumber) {
+	pFrame := (*C.sqlite3_wal_replication_frame)(unsafe.Pointer(
+		uintptr(unsafe.Pointer(l.aFrame)) +
+			unsafe.Sizeof(*l.aFrame)*uintptr(i),
+	))
+	return pFrame.pBuf, PageNumber(pFrame.pgno), FrameNumber(pFrame.iPrev)
+}
+
+// IsCommit returns whether this batch of WAL frames concludes a transaction.
+func (l *WalReplicationFrameList) IsCommit() bool {
+	return l.isCommit > 0
+}
+
+// WalReplicationFrameInfo information about a single batch of WAL frames that
+// are being replicated by a follower connection.
+type WalReplicationFrameInfo struct {
+	isBegin   C.int
+	szPage    C.int
+	nFrame    C.int
+	aPgno     *C.unsigned
+	aPage     unsafe.Pointer
+	nTruncate C.uint
+	isCommit  C.int
+}
+
+// IsBegin sets the C isBegin parameter for sqlite3_wal_replication_frames.
+func (i *WalReplicationFrameInfo) IsBegin(flag bool) {
+	if flag {
+		i.isBegin = C.int(1)
+	} else {
+		i.isBegin = C.int(0)
+	}
+}
+
+// PageSize sets the C szPage parameter for sqlite3_wal_replication_frames.
+func (i *WalReplicationFrameInfo) PageSize(size int) {
+	i.szPage = C.int(size)
+}
+
+// Len sets the C nFrame parameter for sqlite3_wal_replication_frames.
+func (i *WalReplicationFrameInfo) Len(n int) {
+	i.nFrame = C.int(n)
+}
+
+// Pages sets the C aPgno and aPage parameters for sqlite3_wal_replication_frames.
+func (i *WalReplicationFrameInfo) Pages(numbers []PageNumber, data unsafe.Pointer) {
+	i.aPgno = (*C.unsigned)(&numbers[0])
+	i.aPage = data
+}
+
+// Truncate sets the nTruncate parameter for sqlite3_wal_replication_frames.
+func (i *WalReplicationFrameInfo) Truncate(truncate uint) {
+	i.nTruncate = C.unsigned(truncate)
+}
+
+// IsCommit sets the isCommit parameter for sqlite3_wal_replication_frames.
+func (i *WalReplicationFrameInfo) IsCommit(flag bool) {
+	if flag {
+		i.isCommit = C.int(1)
+	} else {
+		i.isCommit = C.int(0)
+	}
+}
+
+func (i *WalReplicationFrameInfo) IsCommitGet() bool {
+	return i.isCommit > 0
+}
+
+//export walReplicationBegin
+func walReplicationBegin(handle C.uintptr_t, db *C.sqlite3) C.int {
+	methods := walReplicationMethodsIndex[handle]
+
+	return C.int(methods.Begin((*Conn)(unsafe.Pointer(db))))
+}
+
+//export walReplicationAbort
+func walReplicationAbort(handle C.uintptr_t, db *C.sqlite3) C.int {
+	methods := walReplicationMethodsIndex[handle]
+	return C.int(methods.Abort((*Conn)(unsafe.Pointer(db))))
+}
+
+//export walReplicationFrames
+func walReplicationFrames(
+	handle C.uintptr_t,
+	db *C.sqlite3,
+	szPage C.int,
+	nFrame C.int,
+	aFrame *C.sqlite3_wal_replication_frame,
+	nTruncate C.uint,
+	isCommit C.int,
+) C.int {
+	methods := walReplicationMethodsIndex[handle]
+
+	list := WalReplicationFrameList{
+		szPage:    szPage,
+		nFrame:    nFrame,
+		aFrame:    aFrame,
+		nTruncate: nTruncate,
+		isCommit:  isCommit,
+	}
+
+	return C.int(methods.Frames((*Conn)(unsafe.Pointer(db)), list))
+}
+
+//export walReplicationUndo
+func walReplicationUndo(handle C.uintptr_t, db *C.sqlite3) C.int {
+	methods := walReplicationMethodsIndex[handle]
+
+	return C.int(methods.Undo((*Conn)(unsafe.Pointer(db))))
+}
+
+//export walReplicationEnd
+func walReplicationEnd(handle C.uintptr_t, db *C.sqlite3) C.int {
+	methods := walReplicationMethodsIndex[handle]
+
+	return C.int(methods.End((*Conn)(unsafe.Pointer(db))))
+}
+
+// Map uintptr to WalReplicationMethods instances to avoid passing Go pointers
+// to C.
+//
+// We do not protect this map with a lock since typically just one long-lived
+// WalReplication instance should be registered (except for unit tests).
+var walReplicationMethodsSerial C.uintptr_t = 100
+var walReplicationMethodsIndex = map[C.uintptr_t]WalReplicationMethods{}
+
+// Hard-coded main schema name.
+//
+// TODO: support replicating also attached databases.
+var walReplicationSchema = C.CString("main")
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/buffer.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/buffer.go
new file mode 100644
index 0000000000..00efe94ffe
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/buffer.go
@@ -0,0 +1,11 @@
+package client
+
+// Buffer for reading responses or writing requests.
+type buffer struct {
+	Bytes  []byte
+	Offset int
+}
+
+func (b *buffer) Advance(amount int) {
+	b.Offset += amount
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/client.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/client.go
new file mode 100644
index 0000000000..08c3465caf
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/client.go
@@ -0,0 +1,322 @@
+package client
+
+import (
+	"context"
+	"encoding/binary"
+	"io"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/logging"
+	"github.com/pkg/errors"
+)
+
+// Client connecting to a dqlite server and speaking the dqlite wire protocol.
+type Client struct {
+	log              logging.Func  // Logging function.
+	address          string        // Address of the connected dqlite server.
+	store            ServerStore   // Update this store upon heartbeats.
+	conn             net.Conn      // Underlying network connection.
+	heartbeatTimeout time.Duration // Heartbeat timeout reported at registration.
+	contextTimeout   time.Duration // Default context timeout.
+	closeCh          chan struct{} // Stops the heartbeat when the connection gets closed
+	mu               sync.Mutex    // Serialize requests
+}
+
+func newClient(conn net.Conn, address string, store ServerStore, log logging.Func) *Client {
+	//logger.With(zap.String("target", address)
+	client := &Client{
+		conn:           conn,
+		address:        address,
+		store:          store,
+		log:            log,
+		closeCh:        make(chan struct{}),
+		contextTimeout: 5 * time.Second,
+	}
+
+	return client
+}
+
+// SetContextTimeout sets the default context timeout when no deadline is
+// provided.
+func (c *Client) SetContextTimeout(timeout time.Duration) {
+	c.contextTimeout = timeout
+}
+
+// Call invokes a dqlite RPC, sending a request message and receiving a
+// response message.
+func (c *Client) Call(ctx context.Context, request, response *Message) error {
+	// We need to take a lock since the dqlite server currently does not
+	// support concurrent requests.
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	// Honor the ctx deadline, if present, or use a default.
+	deadline, ok := ctx.Deadline()
+	if !ok {
+		deadline = time.Now().Add(c.contextTimeout)
+	}
+
+	c.conn.SetDeadline(deadline)
+
+	if err := c.send(request); err != nil {
+		return errors.Wrap(err, "failed to send request")
+	}
+
+	if err := c.recv(response); err != nil {
+		return errors.Wrap(err, "failed to receive response")
+	}
+
+	return nil
+}
+
+// More is used when a request maps to multiple responses.
+func (c *Client) More(ctx context.Context, response *Message) error {
+	return c.recv(response)
+}
+
+// Interrupt sends an interrupt request and awaits for the server's empty
+// response.
+func (c *Client) Interrupt(ctx context.Context, request *Message, response *Message) error {
+	// We need to take a lock since the dqlite server currently does not
+	// support concurrent requests.
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	// Honor the ctx deadline, if present, or use a default.
+	deadline, ok := ctx.Deadline()
+	if !ok {
+		deadline = time.Now().Add(2 * time.Second)
+	}
+	c.conn.SetDeadline(deadline)
+
+	defer request.Reset()
+
+	EncodeInterrupt(request, 0)
+
+	if err := c.send(request); err != nil {
+		return errors.Wrap(err, "failed to send interrupt request")
+	}
+
+	for {
+		if err := c.recv(response); err != nil {
+			response.Reset()
+			return errors.Wrap(err, "failed to receive response")
+		}
+
+		mtype, _ := response.getHeader()
+		response.Reset()
+
+		if mtype == bindings.ResponseEmpty {
+			break
+		}
+	}
+
+	return nil
+}
+
+// Close the client connection.
+func (c *Client) Close() error {
+	c.log(bindings.LogInfo, "closing client")
+
+	close(c.closeCh)
+
+	return c.conn.Close()
+}
+
+func (c *Client) send(req *Message) error {
+	if err := c.sendHeader(req); err != nil {
+		return errors.Wrap(err, "failed to send header")
+	}
+
+	if err := c.sendBody(req); err != nil {
+		return errors.Wrap(err, "failed to send body")
+	}
+
+	return nil
+}
+
+func (c *Client) sendHeader(req *Message) error {
+	n, err := c.conn.Write(req.header[:])
+	if err != nil {
+		return errors.Wrap(err, "failed to send header")
+	}
+
+	if n != messageHeaderSize {
+		return errors.Wrap(io.ErrShortWrite, "failed to send header")
+	}
+
+	return nil
+}
+
+func (c *Client) sendBody(req *Message) error {
+	buf := req.body1.Bytes[:req.body1.Offset]
+	n, err := c.conn.Write(buf)
+	if err != nil {
+		return errors.Wrap(err, "failed to send static body")
+	}
+
+	if n != len(buf) {
+		return errors.Wrap(io.ErrShortWrite, "failed to write body")
+	}
+
+	if req.body2.Bytes == nil {
+		return nil
+	}
+
+	buf = req.body2.Bytes[:req.body2.Offset]
+	n, err = c.conn.Write(buf)
+	if err != nil {
+		return errors.Wrap(err, "failed to send dynamic body")
+	}
+
+	if n != len(buf) {
+		return errors.Wrap(io.ErrShortWrite, "failed to write body")
+	}
+
+	return nil
+}
+
+func (c *Client) recv(res *Message) error {
+	if err := c.recvHeader(res); err != nil {
+		return errors.Wrap(err, "failed to receive header")
+	}
+
+	if err := c.recvBody(res); err != nil {
+		return errors.Wrap(err, "failed to receive body")
+	}
+
+	return nil
+}
+
+func (c *Client) recvHeader(res *Message) error {
+	if err := c.recvPeek(res.header); err != nil {
+		return errors.Wrap(err, "failed to receive header")
+	}
+
+	res.words = binary.LittleEndian.Uint32(res.header[0:])
+	res.mtype = res.header[4]
+	res.flags = res.header[5]
+	res.extra = binary.LittleEndian.Uint16(res.header[6:])
+
+	return nil
+}
+
+func (c *Client) recvBody(res *Message) error {
+	n := int(res.words) * messageWordSize
+	n1 := n
+	n2 := 0
+
+	if n1 > len(res.body1.Bytes) {
+		// We need to allocate the dynamic buffer.
+		n1 = len(res.body1.Bytes)
+		n2 = n - n1
+	}
+
+	buf := res.body1.Bytes[:n1]
+
+	if err := c.recvPeek(buf); err != nil {
+		return errors.Wrap(err, "failed to read body")
+	}
+
+	if n2 > 0 {
+		res.body2.Bytes = make([]byte, n2)
+		res.body2.Offset = 0
+		buf = res.body2.Bytes
+		if err := c.recvPeek(buf); err != nil {
+			return errors.Wrap(err, "failed to read body")
+		}
+	}
+
+	return nil
+}
+
+// Read until buf is full.
+func (c *Client) recvPeek(buf []byte) error {
+	for offset := 0; offset < len(buf); {
+		n, err := c.recvFill(buf[offset:])
+		if err != nil {
+			return err
+		}
+		offset += n
+	}
+
+	return nil
+}
+
+// Try to fill buf, but perform at most one read.
+func (c *Client) recvFill(buf []byte) (int, error) {
+	// Read new data: try a limited number of times.
+	//
+	// This technique is copied from bufio.Reader.
+	for i := messageMaxConsecutiveEmptyReads; i > 0; i-- {
+		n, err := c.conn.Read(buf)
+		if n < 0 {
+			panic(errNegativeRead)
+		}
+		if err != nil {
+			return -1, err
+		}
+		if n > 0 {
+			return n, nil
+		}
+	}
+	return -1, io.ErrNoProgress
+}
+
+func (c *Client) heartbeat() {
+	request := Message{}
+	request.Init(16)
+	response := Message{}
+	response.Init(512)
+
+	for {
+		delay := c.heartbeatTimeout / 3
+
+		//c.logger.Debug("sending heartbeat", zap.Duration("delay", delay))
+		time.Sleep(delay)
+
+		// Check if we've been closed.
+		select {
+		case <-c.closeCh:
+			return
+		default:
+		}
+
+		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+
+		EncodeHeartbeat(&request, uint64(time.Now().Unix()))
+
+		err := c.Call(ctx, &request, &response)
+
+		// We bail out upon failures.
+		//
+		// TODO: make the client survive temporary disconnections.
+		if err != nil {
+			cancel()
+			//c.logger.Error("heartbeat failed", zap.Error(err))
+			return
+		}
+
+		//addresses, err := DecodeServers(&response)
+		_, err = DecodeServers(&response)
+		if err != nil {
+			cancel()
+			//c.logger.Error("invalid heartbeat response", zap.Error(err))
+			return
+		}
+
+		// if err := c.store.Set(ctx, addresses); err != nil {
+		// 	cancel()
+		// 	c.logger.Error("failed to update servers", zap.Error(err))
+		// 	return
+		// }
+
+		cancel()
+
+		request.Reset()
+		response.Reset()
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/config.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/config.go
new file mode 100644
index 0000000000..6d349f7bf0
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/config.go
@@ -0,0 +1,14 @@
+package client
+
+import (
+	"time"
+
+	"github.com/Rican7/retry/strategy"
+)
+
+// Config holds various configuration parameters for a dqlite client.
+type Config struct {
+	Dial            DialFunc            // Network dialer.
+	AttemptTimeout  time.Duration       // Timeout for each individual Dial attempt.
+	RetryStrategies []strategy.Strategy // Strategies used for retrying to connect to a leader.
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/connector.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/connector.go
new file mode 100644
index 0000000000..ae3df0e772
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/connector.go
@@ -0,0 +1,231 @@
+package client
+
+import (
+	"context"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/logging"
+	"github.com/Rican7/retry"
+	"github.com/pkg/errors"
+)
+
+// Connector is in charge of creating a dqlite SQL client connected to the
+// current leader of a cluster.
+type Connector struct {
+	id       uint64       // Client ID to use when registering against the server.
+	store    ServerStore  // Used to get and update current cluster servers.
+	config   Config       // Connection parameters.
+	log      logging.Func // Logging function.
+	protocol []byte       // Protocol version
+}
+
+// NewConnector returns a new connector that can be used by a dqlite driver to
+// create new clients connected to a leader dqlite server.
+func NewConnector(id uint64, store ServerStore, config Config, log logging.Func) *Connector {
+	connector := &Connector{
+		id:       id,
+		store:    store,
+		config:   config,
+		log:      log,
+		protocol: make([]byte, 8),
+	}
+
+	// Latest protocol version.
+	binary.LittleEndian.PutUint64(
+		connector.protocol,
+		bindings.ProtocolVersion,
+	)
+
+	return connector
+}
+
+// Connect finds the leader server and returns a connection to it.
+//
+// If the connector is stopped before a leader is found, nil is returned.
+func (c *Connector) Connect(ctx context.Context) (*Client, error) {
+	var client *Client
+
+	// The retry strategy should be configured to retry indefinitely, until
+	// the given context is done.
+	err := retry.Retry(func(attempt uint) error {
+		log := func(l logging.Level, format string, a ...interface{}) {
+			format += fmt.Sprintf(" attempt=%d", attempt)
+			c.log(l, fmt.Sprintf(format, a...))
+		}
+
+		select {
+		case <-ctx.Done():
+			// Stop retrying
+			return nil
+		default:
+		}
+
+		var err error
+		client, err = c.connectAttemptAll(ctx, log)
+		if err != nil {
+			log(logging.Debug, "connection failed err=%v", err)
+			return err
+		}
+
+		return nil
+	}, c.config.RetryStrategies...)
+
+	if err != nil {
+		// The retry strategy should never give up until success or
+		// context expiration.
+		panic("connect retry aborted unexpectedly")
+	}
+
+	if ctx.Err() != nil {
+		return nil, ErrNoAvailableLeader
+	}
+
+	return client, nil
+}
+
+// Make a single attempt to establish a connection to the leader server trying
+// all addresses available in the store.
+func (c *Connector) connectAttemptAll(ctx context.Context, log logging.Func) (*Client, error) {
+	servers, err := c.store.Get(ctx)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get cluster servers")
+	}
+
+	// Make an attempt for each address until we find the leader.
+	for _, server := range servers {
+		log := func(l logging.Level, format string, a ...interface{}) {
+			format += fmt.Sprintf(" address=%s", server.Address)
+			log(l, fmt.Sprintf(format, a...))
+		}
+
+		ctx, cancel := context.WithTimeout(ctx, c.config.AttemptTimeout)
+		defer cancel()
+
+		conn, leader, err := c.connectAttemptOne(ctx, server.Address)
+		if err != nil {
+			// This server is unavailable, try with the next target.
+			log(logging.Debug, "server connection failed err=%v", err)
+			continue
+		}
+		if conn != nil {
+			// We found the leader
+			log(logging.Info, "connected")
+			return conn, nil
+		}
+		if leader == "" {
+			// This server does not know who the current leader is,
+			// try with the next target.
+			continue
+		}
+
+		// If we get here, it means this server reported that another
+		// server is the leader, let's close the connection to this
+		// server and try with the suggested one.
+		//logger = logger.With(zap.String("leader", leader))
+		conn, leader, err = c.connectAttemptOne(ctx, leader)
+		if err != nil {
+			// The leader reported by the previous server is
+			// unavailable, try with the next target.
+			//logger.Info("leader server connection failed", zap.String("err", err.Error()))
+			continue
+		}
+		if conn == nil {
+			// The leader reported by the target server does not consider itself
+			// the leader, try with the next target.
+			//logger.Info("reported leader server is not the leader")
+			continue
+		}
+		log(logging.Info, "connected")
+		return conn, nil
+	}
+
+	return nil, ErrNoAvailableLeader
+}
+
+// Connect to the given dqlite server and check if it's the leader.
+//
+// Return values:
+//
+// - Any failure is hit:                     -> nil, "", err
+// - Target not leader and no leader known:  -> nil, "", nil
+// - Target not leader and leader known:     -> nil, leader, nil
+// - Target is the leader:                   -> server, "", nil
+//
+func (c *Connector) connectAttemptOne(ctx context.Context, address string) (*Client, string, error) {
+	// Establish the connection.
+	conn, err := c.config.Dial(ctx, address)
+	if err != nil {
+		return nil, "", errors.Wrap(err, "failed to establish network connection")
+	}
+
+	// Perform the protocol handshake.
+	n, err := conn.Write(c.protocol)
+	if err != nil {
+		conn.Close()
+		return nil, "", errors.Wrap(err, "failed to send handshake")
+	}
+	if n != 8 {
+		conn.Close()
+		return nil, "", errors.Wrap(io.ErrShortWrite, "failed to send handshake")
+	}
+
+	client := newClient(conn, address, c.store, c.log)
+
+	// Send the initial Leader request.
+	request := Message{}
+	request.Init(16)
+	response := Message{}
+	response.Init(512)
+
+	EncodeLeader(&request)
+
+	if err := client.Call(ctx, &request, &response); err != nil {
+		client.Close()
+		return nil, "", errors.Wrap(err, "failed to send Leader request")
+	}
+
+	leader, err := DecodeServer(&response)
+	if err != nil {
+		client.Close()
+		return nil, "", errors.Wrap(err, "failed to parse Server response")
+	}
+
+	switch leader {
+	case "":
+		// Currently this server does not know about any leader.
+		client.Close()
+		return nil, "", nil
+	case address:
+		// This server is the leader, register ourselves and return.
+		request.Reset()
+		response.Reset()
+
+		EncodeClient(&request, c.id)
+
+		if err := client.Call(ctx, &request, &response); err != nil {
+			client.Close()
+			return nil, "", errors.Wrap(err, "failed to send Client request")
+		}
+
+		heartbeatTimeout, err := DecodeWelcome(&response)
+		if err != nil {
+			client.Close()
+			return nil, "", errors.Wrap(err, "failed to parse Welcome response")
+		}
+
+		client.heartbeatTimeout = time.Duration(heartbeatTimeout) * time.Millisecond
+
+		// TODO: enable heartbeat
+		//go client.heartbeat()
+
+		return client, "", nil
+	default:
+		// This server claims to know who the current leader is.
+		client.Close()
+		return nil, leader, nil
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/dial.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/dial.go
new file mode 100644
index 0000000000..7b58aca0a7
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/dial.go
@@ -0,0 +1,23 @@
+package client
+
+import (
+	"context"
+	"net"
+)
+
+// DialFunc is a function that can be used to establish a network connection.
+type DialFunc func(context.Context, string) (net.Conn, error)
+
+// TCPDial is a dial function using plain TCP to establish the network
+// connection.
+func TCPDial(ctx context.Context, address string) (net.Conn, error) {
+	dialer := net.Dialer{}
+	return dialer.DialContext(ctx, "tcp", address)
+}
+
+// UnixDial is a dial function using Unix sockets to establish the network
+// connection.
+func UnixDial(ctx context.Context, address string) (net.Conn, error) {
+	dialer := net.Dialer{}
+	return dialer.DialContext(ctx, "unix", address)
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/errors.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/errors.go
new file mode 100644
index 0000000000..88665a89c0
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/errors.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+	"fmt"
+)
+
+// Client errors.
+var (
+	ErrNoAvailableLeader = fmt.Errorf("no available dqlite leader server found")
+	errStop              = fmt.Errorf("connector was stopped")
+	errStaleLeader       = fmt.Errorf("server has lost leadership")
+	errNotClustered      = fmt.Errorf("server is not clustered")
+	errNegativeRead      = fmt.Errorf("reader returned negative count from Read")
+	errMessageEOF        = fmt.Errorf("message eof")
+)
+
+// ErrRequest is returned in case of request failure.
+type ErrRequest struct {
+	Code        uint64
+	Description string
+}
+
+func (e ErrRequest) Error() string {
+	return fmt.Sprintf("%s (%d)", e.Description, e.Code)
+}
+
+// ErrRowsPart is returned when the first batch of a multi-response result
+// batch is done.
+var ErrRowsPart = fmt.Errorf("not all rows were returned in this response")
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/message.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/message.go
new file mode 100644
index 0000000000..c5dbc315aa
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/message.go
@@ -0,0 +1,585 @@
+package client
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"encoding/binary"
+	"io"
+	"math"
+	"strings"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+
+// NamedValues is a type alias of a slice of driver.NamedValue. It's used by
+// schema.sh to generate encoding logic for statement parameters.
+type NamedValues = []driver.NamedValue
+
+// Servers is a type alias of a slice of bindings.ServerInfo. It's used by
+// schema.sh to generate decoding logic for the heartbeat response.
+type Servers []bindings.ServerInfo
+
+// Message holds data about a single request or response.
+type Message struct {
+	words  uint32
+	mtype  uint8
+	flags  uint8
+	extra  uint16
+	header []byte // Statically allocated header buffer
+	body1  buffer // Statically allocated body data, using bytes
+	body2  buffer // Dynamically allocated body data
+}
+
+// Init initializes the message using the given size of the statically
+// allocated buffer (i.e. a buffer which is re-used across requests or
+// responses encoded or decoded using this message object).
+func (m *Message) Init(staticSize int) {
+	if (staticSize % messageWordSize) != 0 {
+		panic("static size is not aligned to word boundary")
+	}
+	m.header = make([]byte, messageHeaderSize)
+	m.body1.Bytes = make([]byte, staticSize)
+	m.Reset()
+}
+
+// Reset the state of the message so it can be used to encode or decode again.
+func (m *Message) Reset() {
+	m.words = 0
+	m.mtype = 0
+	m.flags = 0
+	m.extra = 0
+	for i := 0; i < messageHeaderSize; i++ {
+		m.header[i] = 0
+	}
+	m.body1.Offset = 0
+	m.body2.Bytes = nil
+	m.body2.Offset = 0
+}
+
+// Append a byte slice to the message.
+func (m *Message) putBlob(v []byte) {
+	size := len(v)
+	pad := 0
+	if (size % messageWordSize) != 0 {
+		// Account for padding
+		pad = messageWordSize - (size % messageWordSize)
+		size += pad
+	}
+
+	b := m.bufferForPut(size)
+	defer b.Advance(size)
+
+	// Copy the bytes into the buffer.
+	offset := b.Offset
+	copy(b.Bytes[offset:], v)
+	offset += len(v)
+
+	// Add padding
+	for i := 0; i < pad; i++ {
+		b.Bytes[offset] = 0
+		offset++
+	}
+}
+
+// Append a string to the message.
+func (m *Message) putString(v string) {
+	size := len(v) + 1
+	pad := 0
+	if (size % messageWordSize) != 0 {
+		// Account for padding
+		pad = messageWordSize - (size % messageWordSize)
+		size += pad
+	}
+
+	b := m.bufferForPut(size)
+	defer b.Advance(size)
+
+	// Copy the string bytes into the buffer.
+	offset := b.Offset
+	copy(b.Bytes[offset:], v)
+	offset += len(v)
+
+	// Add a nul byte
+	b.Bytes[offset] = 0
+	offset++
+
+	// Add padding
+	for i := 0; i < pad; i++ {
+		b.Bytes[offset] = 0
+		offset++
+	}
+}
+
+// Append a byte to the message.
+func (m *Message) putUint8(v uint8) {
+	b := m.bufferForPut(1)
+	defer b.Advance(1)
+
+	b.Bytes[b.Offset] = v
+}
+
+// Append a 2-byte word to the message.
+func (m *Message) putUint16(v uint16) {
+	b := m.bufferForPut(2)
+	defer b.Advance(2)
+
+	binary.LittleEndian.PutUint16(b.Bytes[b.Offset:], v)
+}
+
+// Append a 4-byte word to the message.
+func (m *Message) putUint32(v uint32) {
+	b := m.bufferForPut(4)
+	defer b.Advance(4)
+
+	binary.LittleEndian.PutUint32(b.Bytes[b.Offset:], v)
+}
+
+// Append an 8-byte word to the message.
+func (m *Message) putUint64(v uint64) {
+	b := m.bufferForPut(8)
+	defer b.Advance(8)
+
+	binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], v)
+}
+
+// Append a signed 8-byte word to the message.
+func (m *Message) putInt64(v int64) {
+	b := m.bufferForPut(8)
+	defer b.Advance(8)
+
+	binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], uint64(v))
+}
+
+// Append a floating point number to the message.
+func (m *Message) putFloat64(v float64) {
+	b := m.bufferForPut(8)
+	defer b.Advance(8)
+
+	binary.LittleEndian.PutUint64(b.Bytes[b.Offset:], math.Float64bits(v))
+}
+
+// Encode the given driver values as binding parameters.
+func (m *Message) putNamedValues(values NamedValues) {
+	n := uint8(len(values)) // N of params
+	if n == 0 {
+		return
+	}
+
+	m.putUint8(n)
+
+	for i := range values {
+		if values[i].Ordinal != i+1 {
+			panic("unexpected ordinal")
+		}
+
+		switch values[i].Value.(type) {
+		case int64:
+			m.putUint8(bindings.Integer)
+		case float64:
+			m.putUint8(bindings.Float)
+		case bool:
+			m.putUint8(bindings.Boolean)
+		case []byte:
+			m.putUint8(bindings.Blob)
+		case string:
+			m.putUint8(bindings.Text)
+		case nil:
+			m.putUint8(bindings.Null)
+		case time.Time:
+			m.putUint8(bindings.ISO8601)
+		default:
+			panic("unsupported value type")
+		}
+	}
+
+	b := m.bufferForPut(1)
+
+	if trailing := b.Offset % messageWordSize; trailing != 0 {
+		// Skip padding bytes
+		b.Advance(messageWordSize - trailing)
+	}
+
+	for i := range values {
+		switch v := values[i].Value.(type) {
+		case int64:
+			m.putInt64(v)
+		case float64:
+			m.putFloat64(v)
+		case bool:
+			if v {
+				m.putUint64(1)
+			} else {
+				m.putUint64(0)
+			}
+		case []byte:
+			m.putBlob(v)
+		case string:
+			m.putString(v)
+		case nil:
+			m.putInt64(0)
+		case time.Time:
+			timestamp := v.Format(iso8601Formats[0])
+			m.putString(timestamp)
+		default:
+			panic("unsupported value type")
+		}
+	}
+
+}
+
+// Finalize the message by setting the message type and the number
+// of words in the body (calculated from the body size).
+func (m *Message) putHeader(mtype uint8) {
+	if m.body1.Offset <= 0 {
+		panic("static offset is not positive")
+	}
+
+	if (m.body1.Offset % messageWordSize) != 0 {
+		panic("static body is not aligned")
+	}
+
+	m.mtype = mtype
+	m.flags = 0
+	m.extra = 0
+
+	m.words = uint32(m.body1.Offset) / messageWordSize
+
+	if m.body2.Bytes == nil {
+		m.finalize()
+		return
+	}
+
+	if m.body2.Offset <= 0 {
+		panic("dynamic offset is not positive")
+	}
+
+	if (m.body2.Offset % messageWordSize) != 0 {
+		panic("dynamic body is not aligned")
+	}
+
+	m.words += uint32(m.body2.Offset) / messageWordSize
+
+	m.finalize()
+}
+
+func (m *Message) finalize() {
+	if m.words == 0 {
+		panic("empty message body")
+	}
+
+	binary.LittleEndian.PutUint32(m.header[0:], m.words)
+	m.header[4] = m.mtype
+	m.header[5] = m.flags
+	binary.LittleEndian.PutUint16(m.header[6:], m.extra)
+}
+
+func (m *Message) bufferForPut(size int) *buffer {
+	if m.body2.Bytes != nil {
+		if (m.body2.Offset + size) > len(m.body2.Bytes) {
+			// Grow body2.
+			//
+			// TODO: find a good grow strategy.
+			bytes := make([]byte, m.body2.Offset+size)
+			copy(bytes, m.body2.Bytes)
+			m.body2.Bytes = bytes
+		}
+
+		return &m.body2
+	}
+
+	if (m.body1.Offset + size) > len(m.body1.Bytes) {
+		m.body2.Bytes = make([]byte, size)
+		m.body2.Offset = 0
+
+		return &m.body2
+	}
+
+	return &m.body1
+}
+
+// Return the message type and its flags.
+func (m *Message) getHeader() (uint8, uint8) {
+	return m.mtype, m.flags
+}
+
+// Read a string from the message body.
+func (m *Message) getString() string {
+	b := m.bufferForGet()
+
+	index := bytes.IndexByte(b.Bytes[b.Offset:], 0)
+	if index == -1 {
+		// Check if the string overflows in the dynamic buffer.
+		if b == &m.body1 && m.body2.Bytes != nil {
+			// Assert that this is the first read of the dynamic buffer.
+			if m.body2.Offset != 0 {
+				panic("static buffer read after dynamic buffer one")
+			}
+			index = bytes.IndexByte(m.body2.Bytes[0:], 0)
+			if index != -1 {
+				// We found the trailing part of the string.
+				data := b.Bytes[b.Offset:]
+				data = append(data, m.body2.Bytes[0:index]...)
+
+				index++
+
+				if trailing := index % messageWordSize; trailing != 0 {
+					// Account for padding, moving index to the next word boundary.
+					index += messageWordSize - trailing
+				}
+
+				m.body1.Offset = len(m.body1.Bytes)
+				m.body2.Advance(index)
+
+				return string(data)
+			}
+		}
+		panic("no string found")
+	}
+	s := string(b.Bytes[b.Offset : b.Offset+index])
+
+	index++
+
+	if trailing := index % messageWordSize; trailing != 0 {
+		// Account for padding, moving index to the next word boundary.
+		index += messageWordSize - trailing
+	}
+
+	b.Advance(index)
+
+	return s
+}
+
+// Read a byte from the message body.
+func (m *Message) getUint8() uint8 {
+	b := m.bufferForGet()
+	defer b.Advance(1)
+
+	return b.Bytes[b.Offset]
+}
+
+// Read a 2-byte word from the message body.
+func (m *Message) getUint16() uint16 {
+	b := m.bufferForGet()
+	defer b.Advance(2)
+
+	return binary.LittleEndian.Uint16(b.Bytes[b.Offset:])
+}
+
+// Read a 4-byte word from the message body.
+func (m *Message) getUint32() uint32 {
+	b := m.bufferForGet()
+	defer b.Advance(4)
+
+	return binary.LittleEndian.Uint32(b.Bytes[b.Offset:])
+}
+
+// Read reads an 8-byte word from the message body.
+func (m *Message) getUint64() uint64 {
+	b := m.bufferForGet()
+	defer b.Advance(8)
+
+	return binary.LittleEndian.Uint64(b.Bytes[b.Offset:])
+}
+
+// Read a signed 8-byte word from the message body.
+func (m *Message) getInt64() int64 {
+	b := m.bufferForGet()
+	defer b.Advance(8)
+
+	return int64(binary.LittleEndian.Uint64(b.Bytes[b.Offset:]))
+}
+
+// Read a floating point number from the message body.
+func (m *Message) getFloat64() float64 {
+	b := m.bufferForGet()
+	defer b.Advance(8)
+
+	return math.Float64frombits(binary.LittleEndian.Uint64(b.Bytes[b.Offset:]))
+}
+
+// Decode a list of server objects from the message body.
+func (m *Message) getServers() (servers Servers) {
+	defer func() {
+		err := recover()
+		if err != errMessageEOF {
+			panic(err)
+		}
+
+	}()
+
+	for {
+		server := bindings.ServerInfo{
+			ID:      m.getUint64(),
+			Address: m.getString(),
+		}
+		servers = append(servers, server)
+		m.bufferForGet()
+	}
+}
+
+// Decode a statement result object from the message body.
+func (m *Message) getResult() Result {
+	return Result{
+		LastInsertID: m.getUint64(),
+		RowsAffected: m.getUint64(),
+	}
+}
+
+// Decode a query result set object from the message body.
+func (m *Message) getRows() Rows {
+	// Read the column count and column names.
+	columns := make([]string, m.getUint64())
+
+	for i := range columns {
+		columns[i] = m.getString()
+	}
+
+	rows := Rows{
+		Columns: columns,
+		message: m,
+	}
+	return rows
+}
+
+func (m *Message) bufferForGet() *buffer {
+	size := int(m.words * messageWordSize)
+	if m.body1.Offset == size || m.body1.Offset == len(m.body1.Bytes) {
+		// The static body has been exahusted, use the dynamic one.
+		if m.body1.Offset+m.body2.Offset == size {
+			panic(errMessageEOF)
+		}
+		return &m.body2
+	}
+
+	return &m.body1
+}
+
+// Result holds the result of a statement.
+type Result struct {
+	LastInsertID uint64
+	RowsAffected uint64
+}
+
+// Rows holds a result set encoded in a message body.
+type Rows struct {
+	Columns []string
+	message *Message
+}
+
+// Next returns the next row in the result set.
+func (r *Rows) Next(dest []driver.Value) error {
+	types := make([]uint8, len(r.Columns))
+
+	// Each column needs a 4 byte slot to store the column type. The row
+	// header must be padded to reach word boundary.
+	headerBits := len(types) * 4
+	padBits := 0
+	if trailingBits := (headerBits % messageWordBits); trailingBits != 0 {
+		padBits = (messageWordBits - trailingBits)
+	}
+
+	headerSize := (headerBits + padBits) / messageWordBits * messageWordSize
+
+	for i := 0; i < headerSize; i++ {
+		slot := r.message.getUint8()
+
+		if slot == 0xee {
+			// More rows are available.
+			return ErrRowsPart
+		}
+
+		if slot == 0xff {
+			// Rows EOF marker
+			return io.EOF
+		}
+
+		index := i * 2
+
+		if index >= len(types) {
+			continue // This is padding.
+		}
+
+		types[index] = slot & 0x0f
+
+		index++
+
+		if index >= len(types) {
+			continue // This is padding byte.
+		}
+
+		types[index] = slot >> 4
+	}
+
+	for i := range types {
+		switch types[i] {
+		case bindings.Integer:
+			dest[i] = r.message.getInt64()
+		case bindings.Float:
+			dest[i] = r.message.getFloat64()
+		case bindings.Blob:
+			panic("todo")
+		case bindings.Text:
+			dest[i] = r.message.getString()
+		case bindings.Null:
+			r.message.getUint64()
+			dest[i] = nil
+		case bindings.UnixTime:
+			timestamp := time.Unix(r.message.getInt64(), 0)
+			dest[i] = timestamp
+		case bindings.ISO8601:
+			value := r.message.getString()
+			if value == "" {
+				dest[i] = time.Time{}
+				break
+			}
+			var t time.Time
+			var timeVal time.Time
+			var err error
+			value = strings.TrimSuffix(value, "Z")
+			for _, format := range iso8601Formats {
+				if timeVal, err = time.ParseInLocation(format, value, time.UTC); err == nil {
+					t = timeVal
+					break
+				}
+			}
+			if err != nil {
+				return err
+			}
+			t = t.In(time.Local)
+			dest[i] = t
+		case bindings.Boolean:
+			dest[i] = r.message.getInt64() != 0
+		default:
+			panic("unknown data type")
+		}
+	}
+
+	return nil
+}
+
+// Close the result set and reset the underlying message.
+func (r *Rows) Close() {
+	r.message.Reset()
+}
+
+const (
+	messageWordSize                 = 8
+	messageWordBits                 = messageWordSize * 8
+	messageHeaderSize               = messageWordSize
+	messageMaxConsecutiveEmptyReads = 100
+)
+
+var iso8601Formats = []string{
+	// By default, store timestamps with whatever timezone they come with.
+	// When parsed, they will be returned with the same timezone.
+	"2006-01-02 15:04:05.999999999-07:00",
+	"2006-01-02T15:04:05.999999999-07:00",
+	"2006-01-02 15:04:05.999999999",
+	"2006-01-02T15:04:05.999999999",
+	"2006-01-02 15:04:05",
+	"2006-01-02T15:04:05",
+	"2006-01-02 15:04",
+	"2006-01-02T15:04",
+	"2006-01-02",
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/request.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/request.go
new file mode 100644
index 0000000000..1cfe9a55c9
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/request.go
@@ -0,0 +1,98 @@
+package client
+
+// DO NOT EDIT
+//
+// This file was generated by ./schema.sh
+
+import (
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+
+// EncodeLeader encodes a Leader request.
+func EncodeLeader(request *Message) {
+	request.putUint64(0)
+
+	request.putHeader(bindings.RequestLeader)
+}
+
+// EncodeClient encodes a Client request.
+func EncodeClient(request *Message, id uint64) {
+	request.putUint64(id)
+
+	request.putHeader(bindings.RequestClient)
+}
+
+// EncodeHeartbeat encodes a Heartbeat request.
+func EncodeHeartbeat(request *Message, timestamp uint64) {
+	request.putUint64(timestamp)
+
+	request.putHeader(bindings.RequestHeartbeat)
+}
+
+// EncodeOpen encodes a Open request.
+func EncodeOpen(request *Message, name string, flags uint64, vfs string) {
+	request.putString(name)
+	request.putUint64(flags)
+	request.putString(vfs)
+
+	request.putHeader(bindings.RequestOpen)
+}
+
+// EncodePrepare encodes a Prepare request.
+func EncodePrepare(request *Message, db uint64, sql string) {
+	request.putUint64(db)
+	request.putString(sql)
+
+	request.putHeader(bindings.RequestPrepare)
+}
+
+// EncodeExec encodes a Exec request.
+func EncodeExec(request *Message, db uint32, stmt uint32, values NamedValues) {
+	request.putUint32(db)
+	request.putUint32(stmt)
+	request.putNamedValues(values)
+
+	request.putHeader(bindings.RequestExec)
+}
+
+// EncodeQuery encodes a Query request.
+func EncodeQuery(request *Message, db uint32, stmt uint32, values NamedValues) {
+	request.putUint32(db)
+	request.putUint32(stmt)
+	request.putNamedValues(values)
+
+	request.putHeader(bindings.RequestQuery)
+}
+
+// EncodeFinalize encodes a Finalize request.
+func EncodeFinalize(request *Message, db uint32, stmt uint32) {
+	request.putUint32(db)
+	request.putUint32(stmt)
+
+	request.putHeader(bindings.RequestFinalize)
+}
+
+// EncodeExecSQL encodes a ExecSQL request.
+func EncodeExecSQL(request *Message, db uint64, sql string, values NamedValues) {
+	request.putUint64(db)
+	request.putString(sql)
+	request.putNamedValues(values)
+
+	request.putHeader(bindings.RequestExecSQL)
+}
+
+// EncodeQuerySQL encodes a QuerySQL request.
+func EncodeQuerySQL(request *Message, db uint64, sql string, values NamedValues) {
+	request.putUint64(db)
+	request.putString(sql)
+	request.putNamedValues(values)
+
+	request.putHeader(bindings.RequestQuerySQL)
+}
+
+// EncodeInterrupt encodes a Interrupt request.
+func EncodeInterrupt(request *Message, db uint64) {
+	request.putUint64(db)
+
+	request.putHeader(bindings.RequestInterrupt)
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/response.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/response.go
new file mode 100644
index 0000000000..6d706d98b2
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/response.go
@@ -0,0 +1,213 @@
+package client
+
+// DO NOT EDIT
+//
+// This file was generated by ./schema.sh
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+
+// DecodeFailure decodes a Failure response.
+func DecodeFailure(response *Message) (code uint64, message string, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseFailure {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	code = response.getUint64()
+	message = response.getString()
+
+	return
+}
+
+// DecodeWelcome decodes a Welcome response.
+func DecodeWelcome(response *Message) (heartbeatTimeout uint64, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseWelcome {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	heartbeatTimeout = response.getUint64()
+
+	return
+}
+
+// DecodeServer decodes a Server response.
+func DecodeServer(response *Message) (address string, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseServer {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	address = response.getString()
+
+	return
+}
+
+// DecodeServers decodes a Servers response.
+func DecodeServers(response *Message) (servers Servers, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseServers {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	servers = response.getServers()
+
+	return
+}
+
+// DecodeDb decodes a Db response.
+func DecodeDb(response *Message) (id uint32, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseDb {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	id = response.getUint32()
+	response.getUint32()
+
+	return
+}
+
+// DecodeStmt decodes a Stmt response.
+func DecodeStmt(response *Message) (db uint32, id uint32, params uint64, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseStmt {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	db = response.getUint32()
+	id = response.getUint32()
+	params = response.getUint64()
+
+	return
+}
+
+// DecodeEmpty decodes a Empty response.
+func DecodeEmpty(response *Message) (err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseEmpty {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	response.getUint64()
+
+	return
+}
+
+// DecodeResult decodes a Result response.
+func DecodeResult(response *Message) (result Result, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseResult {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	result = response.getResult()
+
+	return
+}
+
+// DecodeRows decodes a Rows response.
+func DecodeRows(response *Message) (rows Rows, err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.ResponseRows {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+	rows = response.getRows()
+
+	return
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.go
new file mode 100644
index 0000000000..bc7b6d4f82
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.go
@@ -0,0 +1,26 @@
+package client
+
+//go:generate ./schema.sh --request init
+
+//go:generate ./schema.sh --request Leader    unused:uint64
+//go:generate ./schema.sh --request Client    id:uint64
+//go:generate ./schema.sh --request Heartbeat timestamp:uint64
+//go:generate ./schema.sh --request Open      name:string flags:uint64 vfs:string
+//go:generate ./schema.sh --request Prepare   db:uint64 sql:string
+//go:generate ./schema.sh --request Exec      db:uint32 stmt:uint32 values:NamedValues
+//go:generate ./schema.sh --request Query     db:uint32 stmt:uint32 values:NamedValues
+//go:generate ./schema.sh --request Finalize  db:uint32 stmt:uint32
+//go:generate ./schema.sh --request ExecSQL   db:uint64 sql:string values:NamedValues
+//go:generate ./schema.sh --request QuerySQL  db:uint64 sql:string values:NamedValues
+//go:generate ./schema.sh --request Interrupt  db:uint64
+
+//go:generate ./schema.sh --response init
+//go:generate ./schema.sh --response Failure  code:uint64 message:string
+//go:generate ./schema.sh --response Welcome  heartbeatTimeout:uint64
+//go:generate ./schema.sh --response Server   address:string
+//go:generate ./schema.sh --response Servers  servers:Servers
+//go:generate ./schema.sh --response Db       id:uint32 unused:uint32
+//go:generate ./schema.sh --response Stmt     db:uint32 id:uint32 params:uint64
+//go:generate ./schema.sh --response Empty    unused:uint64
+//go:generate ./schema.sh --response Result   result:Result
+//go:generate ./schema.sh --response Rows     rows:Rows
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.sh b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.sh
new file mode 100755
index 0000000000..ca012cea46
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/schema.sh
@@ -0,0 +1,150 @@
+#!/bin/bash
+
+request_init() {
+    cat > request.go <<EOF
+package client
+
+// DO NOT EDIT
+//
+// This file was generated by ./schema.sh
+
+import (
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+EOF
+}
+
+response_init() {
+    cat > response.go <<EOF
+package client
+
+// DO NOT EDIT
+//
+// This file was generated by ./schema.sh
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+EOF
+}
+
+entity=$1
+shift
+
+cmd=$1
+shift
+
+if [ "$entity" = "--request" ]; then
+    if [ "$cmd" = "init" ]; then
+	request_init
+	exit
+    fi
+
+    args=""
+
+    for i in "${@}"
+    do
+	name=$(echo "$i" | cut -f 1 -d :)
+	type=$(echo "$i" | cut -f 2 -d :)
+
+	if [ "$name" = "unused" ]; then
+	    continue
+	fi
+
+	args=$(echo "${args}, ${name} ${type}")
+    done
+
+    cat >> request.go <<EOF
+
+// Encode${cmd} encodes a $cmd request.
+func Encode${cmd}(request *Message${args}) {
+EOF
+
+    for i in "${@}"
+    do
+	name=$(echo "$i" | cut -f 1 -d :)
+	type=$(echo "$i" | cut -f 2 -d :)
+
+	if [ "$name" = "unused" ]; then
+	    name=$(echo "0")
+	fi
+
+	cat >> request.go <<EOF
+	request.put${type^}(${name})
+EOF
+    done
+
+    cat >> request.go <<EOF
+
+	request.putHeader(bindings.Request${cmd})
+}
+EOF
+
+fi
+
+if [ "$entity" = "--response" ]; then
+    if [ "$cmd" = "init" ]; then
+	response_init
+	exit
+    fi
+
+    returns=""
+
+    for i in "${@}"
+    do
+	name=$(echo "$i" | cut -f 1 -d :)
+	type=$(echo "$i" | cut -f 2 -d :)
+
+	if [ "$name" = "unused" ]; then
+	    continue
+	fi
+
+	returns=$(echo "${returns}${name} ${type}, ")
+    done
+
+    cat >> response.go <<EOF
+
+// Decode${cmd} decodes a $cmd response.
+func Decode${cmd}(response *Message) (${returns}err error) {
+	mtype, _ := response.getHeader()
+
+	if mtype == bindings.ResponseFailure {
+		e := ErrRequest{}
+		e.Code = response.getUint64()
+		e.Description = response.getString()
+                err = e
+                return
+	}
+
+	if mtype != bindings.Response${cmd} {
+		err = fmt.Errorf("unexpected response type %d", mtype)
+                return
+	}
+
+EOF
+
+    for i in "${@}"
+    do
+	name=$(echo "$i" | cut -f 1 -d :)
+	type=$(echo "$i" | cut -f 2 -d :)
+
+	assign=$(echo "${name} = ")
+
+	if [ "$name" = "unused" ]; then
+	    assign=$(echo "")
+	fi
+
+	cat >> response.go <<EOF
+	${assign}response.get${type^}()
+EOF
+    done
+
+    cat >> response.go <<EOF
+
+	return
+}
+EOF
+
+fi
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/store.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/store.go
new file mode 100644
index 0000000000..1fb71fffdc
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/client/store.go
@@ -0,0 +1,48 @@
+package client
+
+import (
+	"context"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+
+// ServerInfo holds information about a single server.
+type ServerInfo = bindings.ServerInfo
+
+// ServerStore is used by a dqlite client to get an initial list of candidate
+// dqlite servers that it can dial in order to find a leader server to connect
+// to.
+//
+// Once connected, the client periodically updates the server addresses in the
+// store by querying the leader about changes in the cluster (such as servers
+// being added or removed).
+type ServerStore interface {
+	// Get return the list of known servers.
+	Get(context.Context) ([]ServerInfo, error)
+
+	// Set updates the list of known cluster servers.
+	Set(context.Context, []ServerInfo) error
+}
+
+// InmemServerStore keeps the list of servers in memory.
+type InmemServerStore struct {
+	servers []ServerInfo
+}
+
+// NewInmemServerStore creates ServerStore which stores its data in-memory.
+func NewInmemServerStore() *InmemServerStore {
+	return &InmemServerStore{
+		servers: make([]ServerInfo, 0),
+	}
+}
+
+// Get the current servers.
+func (i *InmemServerStore) Get(ctx context.Context) ([]ServerInfo, error) {
+	return i.servers, nil
+}
+
+// Set the servers.
+func (i *InmemServerStore) Set(ctx context.Context, servers []ServerInfo) error {
+	i.servers = servers
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/open.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/open.go
new file mode 100644
index 0000000000..a031d5476c
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/open.go
@@ -0,0 +1,76 @@
+package connection
+
+/*
+import (
+	"github.com/CanonicalLtd/go-sqlite3"
+	"github.com/pkg/errors"
+)
+
+// OpenLeader is a wrapper around SQLiteDriver.Open that opens connection in
+// leader replication mode, and sets any additional dqlite-related options.
+//
+// The 'methods' argument is used to set the replication methods.
+func OpenLeader(dsn string, methods sqlite3.ReplicationMethods) (*sqlite3.SQLiteConn, error) {
+	conn, err := open(dsn)
+	if err != nil {
+		return nil, err
+	}
+
+	// Swith to leader replication mode for this connection.
+	if err := conn.ReplicationLeader(methods); err != nil {
+		return nil, err
+	}
+
+	return conn, nil
+
+}
+
+// OpenFollower is a wrapper around SQLiteDriver.Open that opens connection in
+// follower replication mode, and sets any additional dqlite-related options.
+func OpenFollower(dsn string) (*sqlite3.SQLiteConn, error) {
+	conn, err := open(dsn)
+	if err != nil {
+		return nil, err
+	}
+
+	// Switch to leader replication mode for this connection.
+	if err := conn.ReplicationFollower(); err != nil {
+		return nil, err
+	}
+
+	return conn, nil
+}
+
+// Open a SQLite connection, setting anything that is common between leader and
+// follower connections.
+func open(dsn string) (*sqlite3.SQLiteConn, error) {
+	// Open a plain connection.
+	driver := &sqlite3.SQLiteDriver{}
+	conn, err := driver.Open(dsn)
+	if err != nil {
+		return nil, errors.Wrapf(err, "open error for %s", dsn)
+	}
+
+	// Convert driver.Conn interface to concrete sqlite3.SQLiteConn.
+	sqliteConn := conn.(*sqlite3.SQLiteConn)
+
+	// Ensure journal mode is set to WAL, as this is a requirement for
+	// replication.
+	if _, err := sqliteConn.Exec("PRAGMA journal_mode=wal", nil); err != nil {
+		return nil, err
+	}
+
+	// Ensure WAL autocheckpoint disabled, since checkpoints are triggered
+	// by explicitly by dqlite.
+	if _, err := sqliteConn.Exec("PRAGMA wal_autocheckpoint=0", nil); err != nil {
+		return nil, err
+	}
+
+	// Ensure we don't truncate the WAL.
+	if _, err := sqliteConn.Exec("PRAGMA journal_size_limit=-1", nil); err != nil {
+		return nil, err
+	}
+
+	return sqliteConn, nil
+}
+*/
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/snapshot.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/snapshot.go
new file mode 100644
index 0000000000..414a1748bd
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/snapshot.go
@@ -0,0 +1,38 @@
+package connection
+
+import (
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/pkg/errors"
+)
+
+// Snapshot returns a snapshot of the SQLite database with the given path.
+//
+// The snapshot is comprised of two byte slices, one with the content of the
+// database and one is the content of the WAL file.
+func Snapshot(vfs *bindings.Vfs, path string) ([]byte, []byte, error) {
+	database, err := vfs.ReadFile(path)
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "failed to get database file content")
+	}
+
+	wal, err := vfs.ReadFile(path + "-wal")
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "failed to get WAL file content")
+	}
+
+	return database, wal, nil
+}
+
+// Restore the given database and WAL backups, writing them at the given
+// database path.
+func Restore(vfs *bindings.Vfs, path string, database, wal []byte) error {
+	if err := vfs.WriteFile(path, database); err != nil {
+		return errors.Wrap(err, "failed to restore database file")
+	}
+
+	if err := vfs.WriteFile(path+"-wal", wal); err != nil {
+		return errors.Wrap(err, "failed to restore WAL file")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/uri.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/uri.go
new file mode 100644
index 0000000000..ee7ca302c3
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/connection/uri.go
@@ -0,0 +1,61 @@
+package connection
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+
+// ParseURI parses the given sqlite3 URI checking if it's compatible with
+// dqlite.
+//
+// Only pure file names without any directory segment are accepted
+// (e.g. "test.db"). Query parameters are always valid except for
+// "mode=memory".
+//
+// It returns the filename and query parameters.
+func ParseURI(uri string) (string, uint64, error) {
+	filename := uri
+	flags := uint64(bindings.OpenReadWrite | bindings.OpenCreate)
+
+	pos := strings.IndexRune(uri, '?')
+	if pos >= 1 {
+		params, err := url.ParseQuery(uri[pos+1:])
+		if err != nil {
+			return "", 0, err
+		}
+
+		mode := params.Get("mode")
+		switch mode {
+		case "":
+		case "memory":
+			return "", 0, fmt.Errorf("memory database not supported")
+		case "ro":
+			flags = bindings.OpenReadOnly
+		case "rw":
+			flags = bindings.OpenReadWrite
+		case "rwc":
+			flags = bindings.OpenReadWrite | bindings.OpenCreate
+		default:
+			return "", 0, fmt.Errorf("unknown mode %s", mode)
+		}
+
+		filename = filename[:pos]
+	}
+
+	if strings.HasPrefix(filename, "file:") {
+		filename = filename[len("file:"):]
+	}
+
+	if filename == ":memory:" {
+		return "", 0, fmt.Errorf("memory database not supported")
+	}
+
+	if strings.IndexRune(filename, '/') >= 0 {
+		return "", 0, fmt.Errorf("directory segments are invalid")
+	}
+
+	return filename, flags, nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/func.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/func.go
new file mode 100644
index 0000000000..57e0525fa8
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/func.go
@@ -0,0 +1,26 @@
+package logging
+
+import (
+	"fmt"
+	"testing"
+)
+
+// Func is a function that can be used for logging.
+type Func func(Level, string, ...interface{})
+
+// Test returns a logging function that forwards messages to the test logger.
+func Test(t *testing.T) Func {
+	return func(l Level, format string, a ...interface{}) {
+		format = fmt.Sprintf("%s: %s", l.String(), format)
+		t.Logf(format, a...)
+	}
+}
+
+// Stdout returns a logging function that prints log messages on standard
+// output.
+func Stdout() Func {
+	return func(l Level, format string, a ...interface{}) {
+		format = fmt.Sprintf("%s: %s\n", l.String(), format)
+		fmt.Printf(format, a...)
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/level.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/level.go
new file mode 100644
index 0000000000..0f412c2970
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/logging/level.go
@@ -0,0 +1,27 @@
+package logging
+
+// Level defines the logging level.
+type Level int
+
+// Available logging levels.
+const (
+	Debug Level = iota
+	Info
+	Warn
+	Error
+)
+
+func (l Level) String() string {
+	switch l {
+	case Debug:
+		return "DEBUG"
+	case Info:
+		return "INFO"
+	case Warn:
+		return "WARN"
+	case Error:
+		return "ERROR"
+	default:
+		return "UNKNOWN"
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.go
new file mode 100644
index 0000000000..96de2178c6
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.go
@@ -0,0 +1,107 @@
+package protocol
+
+import (
+	"reflect"
+	"strings"
+	"unsafe"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/golang/protobuf/proto"
+	"github.com/pkg/errors"
+)
+
+// MarshalCommand marshals a dqlite FSM command.
+func MarshalCommand(command *Command) ([]byte, error) {
+	return proto.Marshal(command)
+}
+
+// UnmarshalCommand unmarshals a dqlite FSM command.
+func UnmarshalCommand(data []byte) (*Command, error) {
+	command := &Command{}
+	if err := proto.Unmarshal(data, command); err != nil {
+		return nil, errors.Wrap(err, "protobuf failure")
+	}
+	return command, nil
+}
+
+// NewOpen returns a new Command with Open parameters.
+func NewOpen(name string) *Command {
+	params := &Command_Open{Open: &Open{Name: name}}
+	return newCommand(params)
+}
+
+// NewBegin returns a new Command with Begin parameters.
+func NewBegin(txid uint64, name string) *Command {
+	params := &Command_Begin{Begin: &Begin{Txid: txid, Name: name}}
+	return newCommand(params)
+}
+
+// NewFrames returns a new WalFrames protobuf message.
+func NewFrames(txid uint64, filename string, list bindings.WalReplicationFrameList) *Command {
+	length := list.Len()
+	pageSize := list.PageSize()
+
+	numbers := make([]uint32, length)
+	pages := make([]byte, length*pageSize)
+
+	for i := range numbers {
+		data, pgno, _ := list.Frame(i)
+		numbers[i] = uint32(pgno)
+		header := reflect.SliceHeader{Data: uintptr(data), Len: pageSize, Cap: pageSize}
+		var slice []byte
+		slice = reflect.NewAt(reflect.TypeOf(slice), unsafe.Pointer(&header)).Elem().Interface().([]byte)
+		copy(pages[i*pageSize:(i+1)*pageSize], slice)
+	}
+
+	isCommit := int32(0)
+	if list.IsCommit() {
+		isCommit = int32(1)
+	}
+
+	params := &Command_Frames{Frames: &Frames{
+		Txid:        txid,
+		PageSize:    int32(pageSize),
+		PageNumbers: numbers,
+		PageData:    pages,
+		Truncate:    uint32(list.Truncate()),
+		IsCommit:    isCommit,
+		Filename:    filename,
+	}}
+
+	return newCommand(params)
+}
+
+// NewUndo returns a new Undo protobuf message.
+func NewUndo(txid uint64) *Command {
+	params := &Command_Undo{Undo: &Undo{
+		Txid: txid,
+	}}
+	return newCommand(params)
+}
+
+// NewEnd returns a new End protobuf message.
+func NewEnd(txid uint64) *Command {
+	params := &Command_End{End: &End{
+		Txid: txid,
+	}}
+	return newCommand(params)
+}
+
+// NewCheckpoint returns a new Checkpoint protobuf message.
+func NewCheckpoint(name string) *Command {
+	params := &Command_Checkpoint{Checkpoint: &Checkpoint{
+		Name: name,
+	}}
+	return newCommand(params)
+}
+
+func newCommand(payload isCommand_Payload) *Command {
+	return &Command{Payload: payload}
+}
+
+// Name returns a human readable name for the command, based on its Params
+// type.
+func (c *Command) Name() string {
+	typeName := reflect.TypeOf(c.Payload).Elem().String()
+	return strings.ToLower(strings.Replace(typeName, "protocol.Command_", "", 1))
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.pb.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.pb.go
new file mode 100644
index 0000000000..479ce51dec
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.pb.go
@@ -0,0 +1,2253 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: internal/protocol/commands.proto
+
+/*
+	Package protocol is a generated protocol buffer package.
+
+	It is generated from these files:
+		internal/protocol/commands.proto
+
+	It has these top-level messages:
+		Command
+		Open
+		Begin
+		Frames
+		FramesPage
+		Undo
+		End
+		Checkpoint
+*/
+package protocol
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// Command encapsulates the payload fo a dqlite Raft FSM command.
+//
+// On the wire this will be a varint indentifying the command type,
+// followed by the command payload.
+type Command struct {
+	// Types that are valid to be assigned to Payload:
+	//	*Command_Open
+	//	*Command_Begin
+	//	*Command_Frames
+	//	*Command_Undo
+	//	*Command_End
+	//	*Command_Checkpoint
+	Payload isCommand_Payload `protobuf_oneof:"Payload"`
+}
+
+func (m *Command) Reset()                    { *m = Command{} }
+func (m *Command) String() string            { return proto.CompactTextString(m) }
+func (*Command) ProtoMessage()               {}
+func (*Command) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{0} }
+
+type isCommand_Payload interface {
+	isCommand_Payload()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type Command_Open struct {
+	Open *Open `protobuf:"bytes,1,opt,name=open,oneof"`
+}
+type Command_Begin struct {
+	Begin *Begin `protobuf:"bytes,2,opt,name=begin,oneof"`
+}
+type Command_Frames struct {
+	Frames *Frames `protobuf:"bytes,3,opt,name=frames,oneof"`
+}
+type Command_Undo struct {
+	Undo *Undo `protobuf:"bytes,4,opt,name=undo,oneof"`
+}
+type Command_End struct {
+	End *End `protobuf:"bytes,5,opt,name=end,oneof"`
+}
+type Command_Checkpoint struct {
+	Checkpoint *Checkpoint `protobuf:"bytes,6,opt,name=checkpoint,oneof"`
+}
+
+func (*Command_Open) isCommand_Payload()       {}
+func (*Command_Begin) isCommand_Payload()      {}
+func (*Command_Frames) isCommand_Payload()     {}
+func (*Command_Undo) isCommand_Payload()       {}
+func (*Command_End) isCommand_Payload()        {}
+func (*Command_Checkpoint) isCommand_Payload() {}
+
+func (m *Command) GetPayload() isCommand_Payload {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (m *Command) GetOpen() *Open {
+	if x, ok := m.GetPayload().(*Command_Open); ok {
+		return x.Open
+	}
+	return nil
+}
+
+func (m *Command) GetBegin() *Begin {
+	if x, ok := m.GetPayload().(*Command_Begin); ok {
+		return x.Begin
+	}
+	return nil
+}
+
+func (m *Command) GetFrames() *Frames {
+	if x, ok := m.GetPayload().(*Command_Frames); ok {
+		return x.Frames
+	}
+	return nil
+}
+
+func (m *Command) GetUndo() *Undo {
+	if x, ok := m.GetPayload().(*Command_Undo); ok {
+		return x.Undo
+	}
+	return nil
+}
+
+func (m *Command) GetEnd() *End {
+	if x, ok := m.GetPayload().(*Command_End); ok {
+		return x.End
+	}
+	return nil
+}
+
+func (m *Command) GetCheckpoint() *Checkpoint {
+	if x, ok := m.GetPayload().(*Command_Checkpoint); ok {
+		return x.Checkpoint
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Command) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _Command_OneofMarshaler, _Command_OneofUnmarshaler, _Command_OneofSizer, []interface{}{
+		(*Command_Open)(nil),
+		(*Command_Begin)(nil),
+		(*Command_Frames)(nil),
+		(*Command_Undo)(nil),
+		(*Command_End)(nil),
+		(*Command_Checkpoint)(nil),
+	}
+}
+
+func _Command_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*Command)
+	// Payload
+	switch x := m.Payload.(type) {
+	case *Command_Open:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Open); err != nil {
+			return err
+		}
+	case *Command_Begin:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Begin); err != nil {
+			return err
+		}
+	case *Command_Frames:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Frames); err != nil {
+			return err
+		}
+	case *Command_Undo:
+		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Undo); err != nil {
+			return err
+		}
+	case *Command_End:
+		_ = b.EncodeVarint(5<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.End); err != nil {
+			return err
+		}
+	case *Command_Checkpoint:
+		_ = b.EncodeVarint(6<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Checkpoint); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("Command.Payload has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _Command_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*Command)
+	switch tag {
+	case 1: // Payload.open
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Open)
+		err := b.DecodeMessage(msg)
+		m.Payload = &Command_Open{msg}
+		return true, err
+	case 2: // Payload.begin
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Begin)
+		err := b.DecodeMessage(msg)
+		m.Payload = &Command_Begin{msg}
+		return true, err
+	case 3: // Payload.frames
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Frames)
+		err := b.DecodeMessage(msg)
+		m.Payload = &Command_Frames{msg}
+		return true, err
+	case 4: // Payload.undo
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Undo)
+		err := b.DecodeMessage(msg)
+		m.Payload = &Command_Undo{msg}
+		return true, err
+	case 5: // Payload.end
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(End)
+		err := b.DecodeMessage(msg)
+		m.Payload = &Command_End{msg}
+		return true, err
+	case 6: // Payload.checkpoint
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Checkpoint)
+		err := b.DecodeMessage(msg)
+		m.Payload = &Command_Checkpoint{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _Command_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*Command)
+	// Payload
+	switch x := m.Payload.(type) {
+	case *Command_Open:
+		s := proto.Size(x.Open)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Command_Begin:
+		s := proto.Size(x.Begin)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Command_Frames:
+		s := proto.Size(x.Frames)
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Command_Undo:
+		s := proto.Size(x.Undo)
+		n += proto.SizeVarint(4<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Command_End:
+		s := proto.Size(x.End)
+		n += proto.SizeVarint(5<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Command_Checkpoint:
+		s := proto.Size(x.Checkpoint)
+		n += proto.SizeVarint(6<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// Parameters to open a new database and setup the needed data
+// structures for replication.
+type Open struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *Open) Reset()                    { *m = Open{} }
+func (m *Open) String() string            { return proto.CompactTextString(m) }
+func (*Open) ProtoMessage()               {}
+func (*Open) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{1} }
+
+func (m *Open) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+// Parameters to begin a new write transaction.
+//
+// This command is not used anymore, but it's
+// kept for backward-compatibility.
+type Begin struct {
+	Txid uint64 `protobuf:"varint,1,opt,name=txid,proto3" json:"txid,omitempty"`
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *Begin) Reset()                    { *m = Begin{} }
+func (m *Begin) String() string            { return proto.CompactTextString(m) }
+func (*Begin) ProtoMessage()               {}
+func (*Begin) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{2} }
+
+func (m *Begin) GetTxid() uint64 {
+	if m != nil {
+		return m.Txid
+	}
+	return 0
+}
+
+func (m *Begin) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+// Parameters to append new frames to the WAL within a write transaction.
+type Frames struct {
+	Txid        uint64        `protobuf:"varint,1,opt,name=txid,proto3" json:"txid,omitempty"`
+	PageSize    int32         `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+	Pages       []*FramesPage `protobuf:"bytes,3,rep,name=pages" json:"pages,omitempty"`
+	Truncate    uint32        `protobuf:"varint,4,opt,name=truncate,proto3" json:"truncate,omitempty"`
+	IsCommit    int32         `protobuf:"varint,5,opt,name=is_commit,json=isCommit,proto3" json:"is_commit,omitempty"`
+	SyncFlags   uint32        `protobuf:"varint,6,opt,name=sync_flags,json=syncFlags,proto3" json:"sync_flags,omitempty"`
+	Filename    string        `protobuf:"bytes,7,opt,name=filename,proto3" json:"filename,omitempty"`
+	PageNumbers []uint32      `protobuf:"varint,8,rep,packed,name=page_numbers,json=pageNumbers" json:"page_numbers,omitempty"`
+	PageData    []byte        `protobuf:"bytes,9,opt,name=page_data,json=pageData,proto3" json:"page_data,omitempty"`
+}
+
+func (m *Frames) Reset()                    { *m = Frames{} }
+func (m *Frames) String() string            { return proto.CompactTextString(m) }
+func (*Frames) ProtoMessage()               {}
+func (*Frames) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{3} }
+
+func (m *Frames) GetTxid() uint64 {
+	if m != nil {
+		return m.Txid
+	}
+	return 0
+}
+
+func (m *Frames) GetPageSize() int32 {
+	if m != nil {
+		return m.PageSize
+	}
+	return 0
+}
+
+func (m *Frames) GetPages() []*FramesPage {
+	if m != nil {
+		return m.Pages
+	}
+	return nil
+}
+
+func (m *Frames) GetTruncate() uint32 {
+	if m != nil {
+		return m.Truncate
+	}
+	return 0
+}
+
+func (m *Frames) GetIsCommit() int32 {
+	if m != nil {
+		return m.IsCommit
+	}
+	return 0
+}
+
+func (m *Frames) GetSyncFlags() uint32 {
+	if m != nil {
+		return m.SyncFlags
+	}
+	return 0
+}
+
+func (m *Frames) GetFilename() string {
+	if m != nil {
+		return m.Filename
+	}
+	return ""
+}
+
+func (m *Frames) GetPageNumbers() []uint32 {
+	if m != nil {
+		return m.PageNumbers
+	}
+	return nil
+}
+
+func (m *Frames) GetPageData() []byte {
+	if m != nil {
+		return m.PageData
+	}
+	return nil
+}
+
+// A single frame of data in a Frames command.
+//
+// FIXME: this is a legacy message only used in v1 of the protocol.
+type FramesPage struct {
+	Data   []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+	Flags  uint32 `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	Number uint32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"`
+}
+
+func (m *FramesPage) Reset()                    { *m = FramesPage{} }
+func (m *FramesPage) String() string            { return proto.CompactTextString(m) }
+func (*FramesPage) ProtoMessage()               {}
+func (*FramesPage) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{4} }
+
+func (m *FramesPage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *FramesPage) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *FramesPage) GetNumber() uint32 {
+	if m != nil {
+		return m.Number
+	}
+	return 0
+}
+
+// Parameters to undo any previous WAL change in a write transaction.
+type Undo struct {
+	Txid uint64 `protobuf:"varint,1,opt,name=txid,proto3" json:"txid,omitempty"`
+}
+
+func (m *Undo) Reset()                    { *m = Undo{} }
+func (m *Undo) String() string            { return proto.CompactTextString(m) }
+func (*Undo) ProtoMessage()               {}
+func (*Undo) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{5} }
+
+func (m *Undo) GetTxid() uint64 {
+	if m != nil {
+		return m.Txid
+	}
+	return 0
+}
+
+// Parameters to end a write transaction, and update the WAL commit
+// pointer.
+//
+// This command is not used anymore, but it's
+// kept for backward-compatibility.
+type End struct {
+	Txid uint64 `protobuf:"varint,1,opt,name=txid,proto3" json:"txid,omitempty"`
+}
+
+func (m *End) Reset()                    { *m = End{} }
+func (m *End) String() string            { return proto.CompactTextString(m) }
+func (*End) ProtoMessage()               {}
+func (*End) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{6} }
+
+func (m *End) GetTxid() uint64 {
+	if m != nil {
+		return m.Txid
+	}
+	return 0
+}
+
+// Parameters to perform a WAL checkpoint.
+type Checkpoint struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *Checkpoint) Reset()                    { *m = Checkpoint{} }
+func (m *Checkpoint) String() string            { return proto.CompactTextString(m) }
+func (*Checkpoint) ProtoMessage()               {}
+func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptorCommands, []int{7} }
+
+func (m *Checkpoint) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*Command)(nil), "protocol.Command")
+	proto.RegisterType((*Open)(nil), "protocol.Open")
+	proto.RegisterType((*Begin)(nil), "protocol.Begin")
+	proto.RegisterType((*Frames)(nil), "protocol.Frames")
+	proto.RegisterType((*FramesPage)(nil), "protocol.FramesPage")
+	proto.RegisterType((*Undo)(nil), "protocol.Undo")
+	proto.RegisterType((*End)(nil), "protocol.End")
+	proto.RegisterType((*Checkpoint)(nil), "protocol.Checkpoint")
+}
+func (m *Command) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Command) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Payload != nil {
+		nn1, err := m.Payload.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += nn1
+	}
+	return i, nil
+}
+
+func (m *Command_Open) MarshalTo(dAtA []byte) (int, error) {
+	i := 0
+	if m.Open != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Open.Size()))
+		n2, err := m.Open.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+func (m *Command_Begin) MarshalTo(dAtA []byte) (int, error) {
+	i := 0
+	if m.Begin != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Begin.Size()))
+		n3, err := m.Begin.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n3
+	}
+	return i, nil
+}
+func (m *Command_Frames) MarshalTo(dAtA []byte) (int, error) {
+	i := 0
+	if m.Frames != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Frames.Size()))
+		n4, err := m.Frames.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n4
+	}
+	return i, nil
+}
+func (m *Command_Undo) MarshalTo(dAtA []byte) (int, error) {
+	i := 0
+	if m.Undo != nil {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Undo.Size()))
+		n5, err := m.Undo.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	return i, nil
+}
+func (m *Command_End) MarshalTo(dAtA []byte) (int, error) {
+	i := 0
+	if m.End != nil {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.End.Size()))
+		n6, err := m.End.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n6
+	}
+	return i, nil
+}
+func (m *Command_Checkpoint) MarshalTo(dAtA []byte) (int, error) {
+	i := 0
+	if m.Checkpoint != nil {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Checkpoint.Size()))
+		n7, err := m.Checkpoint.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n7
+	}
+	return i, nil
+}
+func (m *Open) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Open) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func (m *Begin) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Begin) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Txid))
+	}
+	if len(m.Name) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func (m *Frames) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Frames) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Txid))
+	}
+	if m.PageSize != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.PageSize))
+	}
+	if len(m.Pages) > 0 {
+		for _, msg := range m.Pages {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintCommands(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if m.Truncate != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Truncate))
+	}
+	if m.IsCommit != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.IsCommit))
+	}
+	if m.SyncFlags != 0 {
+		dAtA[i] = 0x30
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.SyncFlags))
+	}
+	if len(m.Filename) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(len(m.Filename)))
+		i += copy(dAtA[i:], m.Filename)
+	}
+	if len(m.PageNumbers) > 0 {
+		dAtA9 := make([]byte, len(m.PageNumbers)*10)
+		var j8 int
+		for _, num := range m.PageNumbers {
+			for num >= 1<<7 {
+				dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j8++
+			}
+			dAtA9[j8] = uint8(num)
+			j8++
+		}
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(j8))
+		i += copy(dAtA[i:], dAtA9[:j8])
+	}
+	if len(m.PageData) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(len(m.PageData)))
+		i += copy(dAtA[i:], m.PageData)
+	}
+	return i, nil
+}
+
+func (m *FramesPage) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FramesPage) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Data) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
+	}
+	if m.Flags != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Flags))
+	}
+	if m.Number != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Number))
+	}
+	return i, nil
+}
+
+func (m *Undo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Undo) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Txid))
+	}
+	return i, nil
+}
+
+func (m *End) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *End) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(m.Txid))
+	}
+	return i, nil
+}
+
+func (m *Checkpoint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Checkpoint) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintCommands(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func encodeVarintCommands(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Command) Size() (n int) {
+	var l int
+	_ = l
+	if m.Payload != nil {
+		n += m.Payload.Size()
+	}
+	return n
+}
+
+func (m *Command_Open) Size() (n int) {
+	var l int
+	_ = l
+	if m.Open != nil {
+		l = m.Open.Size()
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+func (m *Command_Begin) Size() (n int) {
+	var l int
+	_ = l
+	if m.Begin != nil {
+		l = m.Begin.Size()
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+func (m *Command_Frames) Size() (n int) {
+	var l int
+	_ = l
+	if m.Frames != nil {
+		l = m.Frames.Size()
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+func (m *Command_Undo) Size() (n int) {
+	var l int
+	_ = l
+	if m.Undo != nil {
+		l = m.Undo.Size()
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+func (m *Command_End) Size() (n int) {
+	var l int
+	_ = l
+	if m.End != nil {
+		l = m.End.Size()
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+func (m *Command_Checkpoint) Size() (n int) {
+	var l int
+	_ = l
+	if m.Checkpoint != nil {
+		l = m.Checkpoint.Size()
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+func (m *Open) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+
+func (m *Begin) Size() (n int) {
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		n += 1 + sovCommands(uint64(m.Txid))
+	}
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+
+func (m *Frames) Size() (n int) {
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		n += 1 + sovCommands(uint64(m.Txid))
+	}
+	if m.PageSize != 0 {
+		n += 1 + sovCommands(uint64(m.PageSize))
+	}
+	if len(m.Pages) > 0 {
+		for _, e := range m.Pages {
+			l = e.Size()
+			n += 1 + l + sovCommands(uint64(l))
+		}
+	}
+	if m.Truncate != 0 {
+		n += 1 + sovCommands(uint64(m.Truncate))
+	}
+	if m.IsCommit != 0 {
+		n += 1 + sovCommands(uint64(m.IsCommit))
+	}
+	if m.SyncFlags != 0 {
+		n += 1 + sovCommands(uint64(m.SyncFlags))
+	}
+	l = len(m.Filename)
+	if l > 0 {
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	if len(m.PageNumbers) > 0 {
+		l = 0
+		for _, e := range m.PageNumbers {
+			l += sovCommands(uint64(e))
+		}
+		n += 1 + sovCommands(uint64(l)) + l
+	}
+	l = len(m.PageData)
+	if l > 0 {
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+
+func (m *FramesPage) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Data)
+	if l > 0 {
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	if m.Flags != 0 {
+		n += 1 + sovCommands(uint64(m.Flags))
+	}
+	if m.Number != 0 {
+		n += 1 + sovCommands(uint64(m.Number))
+	}
+	return n
+}
+
+func (m *Undo) Size() (n int) {
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		n += 1 + sovCommands(uint64(m.Txid))
+	}
+	return n
+}
+
+func (m *End) Size() (n int) {
+	var l int
+	_ = l
+	if m.Txid != 0 {
+		n += 1 + sovCommands(uint64(m.Txid))
+	}
+	return n
+}
+
+func (m *Checkpoint) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovCommands(uint64(l))
+	}
+	return n
+}
+
+func sovCommands(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozCommands(x uint64) (n int) {
+	return sovCommands(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Command) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Command: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Command: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Open", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Open{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Payload = &Command_Open{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Begin{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Payload = &Command_Begin{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Frames", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Frames{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Payload = &Command_Frames{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Undo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Undo{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Payload = &Command_Undo{v}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &End{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Payload = &Command_End{v}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Checkpoint{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Payload = &Command_Checkpoint{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Open) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Open: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Open: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Begin) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Begin: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Begin: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txid", wireType)
+			}
+			m.Txid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Txid |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Frames) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Frames: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Frames: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txid", wireType)
+			}
+			m.Txid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Txid |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PageSize", wireType)
+			}
+			m.PageSize = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PageSize |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pages", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Pages = append(m.Pages, &FramesPage{})
+			if err := m.Pages[len(m.Pages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Truncate", wireType)
+			}
+			m.Truncate = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Truncate |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IsCommit", wireType)
+			}
+			m.IsCommit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.IsCommit |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SyncFlags", wireType)
+			}
+			m.SyncFlags = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SyncFlags |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filename = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType == 0 {
+				var v uint32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCommands
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= (uint32(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.PageNumbers = append(m.PageNumbers, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCommands
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= (int(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCommands
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				for iNdEx < postIndex {
+					var v uint32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCommands
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= (uint32(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.PageNumbers = append(m.PageNumbers, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field PageNumbers", wireType)
+			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PageData", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PageData = append(m.PageData[:0], dAtA[iNdEx:postIndex]...)
+			if m.PageData == nil {
+				m.PageData = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *FramesPage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FramesPage: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FramesPage: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+			}
+			m.Flags = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Flags |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType)
+			}
+			m.Number = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Number |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Undo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Undo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Undo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txid", wireType)
+			}
+			m.Txid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Txid |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *End) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: End: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: End: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txid", wireType)
+			}
+			m.Txid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Txid |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Checkpoint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Checkpoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Checkpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCommands
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCommands(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCommands
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipCommands(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowCommands
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowCommands
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthCommands
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowCommands
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipCommands(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthCommands = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowCommands   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("internal/protocol/commands.proto", fileDescriptorCommands) }
+
+var fileDescriptorCommands = []byte{
+	// 478 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
+	0x10, 0x8d, 0x13, 0xdb, 0xb1, 0xa7, 0x31, 0xa0, 0x55, 0x85, 0x4c, 0x11, 0x91, 0x6b, 0x21, 0x11,
+	0xf5, 0x90, 0x48, 0x20, 0xf1, 0x01, 0x09, 0xad, 0x7c, 0x2a, 0xd5, 0x22, 0xce, 0xd1, 0xc6, 0xde,
+	0x84, 0x15, 0xf6, 0xda, 0xb2, 0x1d, 0x89, 0xf6, 0x2b, 0xb8, 0xf2, 0x47, 0x1c, 0xf9, 0x04, 0x94,
+	0x2f, 0x41, 0x33, 0xdb, 0xda, 0xa5, 0xca, 0x6d, 0xe7, 0xbd, 0x37, 0x3b, 0xfb, 0xde, 0x0e, 0x44,
+	0x4a, 0xb7, 0xb2, 0xd6, 0x22, 0x5f, 0x54, 0x75, 0xd9, 0x96, 0x69, 0x99, 0x2f, 0xd2, 0xb2, 0x28,
+	0x84, 0xce, 0x9a, 0x39, 0x21, 0xcc, 0x7b, 0x20, 0xe2, 0x9f, 0x43, 0x18, 0xaf, 0x0c, 0xc9, 0xde,
+	0x82, 0x5d, 0x56, 0x52, 0x87, 0x56, 0x64, 0xcd, 0x4e, 0xde, 0x3f, 0x9b, 0x3f, 0x88, 0xe6, 0x9f,
+	0x2b, 0xa9, 0x93, 0x01, 0x27, 0x96, 0xbd, 0x03, 0x67, 0x23, 0x77, 0x4a, 0x87, 0x43, 0x92, 0x3d,
+	0xef, 0x65, 0x4b, 0x84, 0x93, 0x01, 0x37, 0x3c, 0xbb, 0x00, 0x77, 0x5b, 0x8b, 0x42, 0x36, 0xe1,
+	0x88, 0x94, 0x2f, 0x7a, 0xe5, 0x15, 0xe1, 0xc9, 0x80, 0xdf, 0x2b, 0x70, 0xf4, 0x5e, 0x67, 0x65,
+	0x68, 0x3f, 0x1d, 0xfd, 0x55, 0x67, 0x25, 0x8e, 0x46, 0x96, 0x9d, 0xc3, 0x48, 0xea, 0x2c, 0x74,
+	0x48, 0x14, 0xf4, 0xa2, 0x4b, 0x9d, 0x25, 0x03, 0x8e, 0x1c, 0xfb, 0x08, 0x90, 0x7e, 0x93, 0xe9,
+	0xf7, 0xaa, 0x54, 0xba, 0x0d, 0x5d, 0x52, 0x9e, 0xf6, 0xca, 0x55, 0xc7, 0x25, 0x03, 0xfe, 0x48,
+	0xb9, 0xf4, 0x61, 0x7c, 0x23, 0x6e, 0xf3, 0x52, 0x64, 0xf1, 0x19, 0xd8, 0x68, 0x98, 0x31, 0xb0,
+	0xb5, 0x28, 0x24, 0xc5, 0xe1, 0x73, 0x3a, 0xc7, 0x0b, 0x70, 0xc8, 0x25, 0x92, 0xed, 0x0f, 0x95,
+	0x11, 0x69, 0x73, 0x3a, 0x77, 0x0d, 0xc3, 0x47, 0x0d, 0xbf, 0x86, 0xe0, 0x1a, 0xb7, 0x47, 0x5b,
+	0x5e, 0x83, 0x5f, 0x89, 0x9d, 0x5c, 0x37, 0xea, 0xce, 0xf4, 0x39, 0xdc, 0x43, 0xe0, 0x8b, 0xba,
+	0x93, 0xec, 0x02, 0x1c, 0x3c, 0x63, 0x7e, 0xa3, 0xff, 0x6d, 0x98, 0x1b, 0x6f, 0xc4, 0x4e, 0x72,
+	0x23, 0x61, 0x67, 0xe0, 0xb5, 0xf5, 0x5e, 0xa7, 0xa2, 0x95, 0x14, 0x62, 0xc0, 0xbb, 0x1a, 0x87,
+	0xa8, 0x66, 0x8d, 0x2b, 0xa0, 0x5a, 0x0a, 0xcf, 0xe1, 0x9e, 0x6a, 0x56, 0x54, 0xb3, 0x37, 0x00,
+	0xcd, 0xad, 0x4e, 0xd7, 0xdb, 0x5c, 0xec, 0x1a, 0x0a, 0x2c, 0xe0, 0x3e, 0x22, 0x57, 0x08, 0xe0,
+	0xbd, 0x5b, 0x95, 0x4b, 0xf2, 0x35, 0x26, 0x5f, 0x5d, 0xcd, 0xce, 0x61, 0x42, 0x8f, 0xd7, 0xfb,
+	0x62, 0x23, 0xeb, 0x26, 0xf4, 0xa2, 0xd1, 0x2c, 0xe0, 0x27, 0x88, 0x5d, 0x1b, 0xa8, 0xf3, 0x97,
+	0x89, 0x56, 0x84, 0x7e, 0x64, 0xcd, 0x26, 0xc6, 0xdf, 0x27, 0xd1, 0x8a, 0xf8, 0x1a, 0xa0, 0x37,
+	0x82, 0xf1, 0x90, 0xca, 0x22, 0x15, 0x9d, 0xd9, 0x29, 0x38, 0xe6, 0x5d, 0x43, 0x7a, 0x97, 0x29,
+	0xd8, 0x4b, 0x70, 0xcd, 0x48, 0x5a, 0xac, 0x80, 0xdf, 0x57, 0xf8, 0x71, 0xb8, 0x2e, 0xc7, 0x82,
+	0x8e, 0x5f, 0xc1, 0xe8, 0x52, 0x67, 0x47, 0xa9, 0x08, 0xa0, 0x5f, 0x8b, 0x63, 0xbf, 0xbe, 0x9c,
+	0xfc, 0x3e, 0x4c, 0xad, 0x3f, 0x87, 0xa9, 0xf5, 0xf7, 0x30, 0xb5, 0x36, 0x2e, 0x7d, 0xc3, 0x87,
+	0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x3c, 0x28, 0xf7, 0x67, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.proto b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.proto
new file mode 100644
index 0000000000..d3cde8c564
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/commands.proto
@@ -0,0 +1,87 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+syntax = "proto3";
+
+package protocol;
+
+// Command encapsulates the payload fo a dqlite Raft FSM command.
+//
+// On the wire this will be a varint indentifying the command type,
+// followed by the command payload.
+message Command {
+  oneof Payload {
+    Open open = 1;
+    Begin begin = 2;
+    Frames frames = 3;
+    Undo undo = 4;
+    End end = 5;
+    Checkpoint checkpoint = 6;
+  }
+}
+
+// Parameters to open a new database and setup the needed data
+// structures for replication.
+message Open {
+  string name = 1; // Name of the database file.
+}
+
+// Parameters to begin a new write transaction.
+//
+// This command is not used anymore, but it's
+// kept for backward-compatibility.
+message Begin {
+  uint64 txid = 1; // Transaction identifier.
+  string name = 2; // Name of the database file.
+}
+
+// Parameters to append new frames to the WAL within a write transaction.
+message Frames {
+  uint64 txid = 1;                  // Transaction identifier.
+  int32 page_size = 2;              // Size of the data field of each frame.
+  repeated FramesPage pages = 3;    // List of the frames to write (legacy from v1).
+  uint32 truncate = 4;              // Flag telling if the WAL should be truncated.
+  int32 is_commit = 5;              // Flag telling if this is a final frames write.
+  uint32 sync_flags = 6;            // Flags for disk syncing (legacy from v1).
+  string filename = 7;              // Filename of the database file.
+  repeated uint32 page_numbers = 8; // Page numbers of the frames to write (v2).
+  bytes page_data = 9;              // Contiguous page data (v2).
+}
+
+// A single frame of data in a Frames command.
+//
+// FIXME: this is a legacy message only used in v1 of the protocol.
+message FramesPage {
+  bytes data = 1;    // Frame data.
+  uint32 flags = 2;  // WAL write flags.
+  uint32 number = 3; // Page number (i.e. position in the database file).
+}
+
+// Parameters to undo any previous WAL change in a write transaction.
+message Undo {
+  uint64 txid = 1; // Transaction identifier.
+}
+
+// Parameters to end a write transaction, and update the WAL commit
+// pointer.
+//
+// This command is not used anymore, but it's
+// kept for backward-compatibility.
+message End {
+  uint64 txid = 1; // Transaction identifier.
+}
+
+// Parameters to perform a WAL checkpoint.
+message Checkpoint {
+  string name = 1; // Name of the database file.
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/doc.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/doc.go
new file mode 100644
index 0000000000..3d4fc5404b
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/protocol/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// package protocol implements serialization and deserialization logic for
+// dqlite-specific Raft commands, that will be exeuted by the dqlite's FSM.
+package protocol
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/conn.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/conn.go
new file mode 100644
index 0000000000..acc3fd1cbe
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/conn.go
@@ -0,0 +1,154 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry
+
+import (
+	"fmt"
+	"sync/atomic"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+)
+
+// ConnLeaderAdd adds a new leader connection to the registry.
+func (r *Registry) ConnLeaderAdd(filename string, conn *bindings.Conn) {
+	r.connAdd(conn)
+	r.leaders[conn] = filename
+
+	// Add a tracer specific to this connection. It will be used by
+	// replication.Methods when firing replication hooks triggered by WAL
+	// events on this connection.
+	r.tracers.Add(fmt.Sprintf("methods %d", r.ConnSerial(conn)))
+}
+
+// ConnLeaderDel removes the given leader connection from the registry.
+func (r *Registry) ConnLeaderDel(conn *bindings.Conn) {
+	// Dell the connection-specific tracer.
+	r.tracers.Del(fmt.Sprintf("methods %d", r.ConnSerial(conn)))
+
+	r.connDel(conn)
+	delete(r.leaders, conn)
+
+}
+
+// ConnLeaderFilename returns the filename of the database associated with the
+// given leader connection.
+//
+// If conn is not a registered leader connection, this method will panic.
+func (r *Registry) ConnLeaderFilename(conn *bindings.Conn) string {
+	name, ok := r.leaders[conn]
+	if !ok {
+		panic("no database for the given connection")
+	}
+	return name
+}
+
+// ConnLeaders returns all open leader connections for the database with
+// the given filename.
+func (r *Registry) ConnLeaders(filename string) []*bindings.Conn {
+	conns := []*bindings.Conn{}
+	for conn := range r.leaders {
+		if r.leaders[conn] == filename {
+			conns = append(conns, conn)
+		}
+	}
+	return conns
+}
+
+// ConnFollowerAdd adds a new follower connection to the registry.
+//
+// If a follower connection for the database with the given filename is already
+// registered, this method panics.
+func (r *Registry) ConnFollowerAdd(filename string, conn *bindings.Conn) {
+	r.connAdd(conn)
+	r.followers[filename] = conn
+}
+
+// ConnFollowerDel removes the follower registered against the database with the
+// given filename.
+func (r *Registry) ConnFollowerDel(filename string) {
+	conn, ok := r.followers[filename]
+	if !ok {
+		panic(fmt.Sprintf("follower connection for '%s' is not registered", filename))
+	}
+
+	delete(r.followers, filename)
+	r.connDel(conn)
+}
+
+// ConnFollowerFilenames returns the filenames for all databases which currently
+// have registered follower connections.
+func (r *Registry) ConnFollowerFilenames() []string {
+	names := []string{}
+	for name := range r.followers {
+		names = append(names, name)
+	}
+	return names
+}
+
+// ConnFollower returns the follower connection used to replicate the
+// database identified by the given filename.
+//
+// If there's no follower connection registered for the database with the given
+// filename, this method panics.
+func (r *Registry) ConnFollower(filename string) *bindings.Conn {
+	conn, ok := r.followers[filename]
+	if !ok {
+		panic(fmt.Sprintf("no follower connection for '%s'", filename))
+	}
+	return conn
+}
+
+// ConnFollowerExists checks whether the registry has a follower connection registered
+// against the database with the given filename.
+func (r *Registry) ConnFollowerExists(filename string) bool {
+	_, ok := r.followers[filename]
+	return ok
+}
+
+// ConnSerial returns a serial number uniquely identifying the given registered
+// connection.
+func (r *Registry) ConnSerial(conn *bindings.Conn) uint64 {
+	serial, ok := r.serial[conn]
+
+	if !ok {
+		panic("connection is not registered")
+	}
+
+	return serial
+}
+
+// Add a new connection (either leader or follower) to the registry and assign
+// it a serial number.
+func (r *Registry) connAdd(conn *bindings.Conn) {
+	if serial, ok := r.serial[conn]; ok {
+		panic(fmt.Sprintf("connection is already registered with serial %d", serial))
+	}
+
+	atomic.AddUint64(&serial, 1)
+	r.serial[conn] = serial
+}
+
+// Delete a connection (either leader or follower) from the registry
+func (r *Registry) connDel(conn *bindings.Conn) {
+	if _, ok := r.serial[conn]; !ok {
+		panic("connection is not registered")
+	}
+
+	delete(r.serial, conn)
+}
+
+// Monotonic counter for identifying connections for tracing and debugging
+// purposes.
+var serial uint64
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/fsm.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/fsm.go
new file mode 100644
index 0000000000..df9a559d66
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/fsm.go
@@ -0,0 +1,29 @@
+package registry
+
+// Index returns the last Raft log index that was successfully applied the FSM.
+func (r *Registry) Index() uint64 {
+	return r.index
+}
+
+// IndexUpdate updates the index of the last log applied by the FSM we're
+// associated with.
+func (r *Registry) IndexUpdate(index uint64) {
+	r.index = index
+}
+
+// Frames returns the number of frames that have been written to the WAL so
+// far.
+func (r *Registry) Frames() uint64 {
+	return r.frames
+}
+
+// FramesIncrease increases by the given amount the number of frames written to
+// the WAL so far.
+func (r *Registry) FramesIncrease(n uint64) {
+	r.frames += n
+}
+
+// FramesReset resets the WAL frames counter to zero.
+func (r *Registry) FramesReset() {
+	r.frames = 0
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/hook.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/hook.go
new file mode 100644
index 0000000000..4c464cd437
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/hook.go
@@ -0,0 +1,191 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry
+
+import (
+	"reflect"
+	"sync"
+)
+
+// HookSyncSet creates a new hookSync instance associated with this Registry.
+func (r *Registry) HookSyncSet() {
+	r.hookSyncEnsureUnset()
+	r.hookSync = newHookSync()
+}
+
+// HookSyncAdd adds a new log command data to the underlying hookSync, which is
+// expected to match the Log.Data bytes received next FSM.Apply() call.
+func (r *Registry) HookSyncAdd(data []byte) {
+	r.hookSyncEnsureSet()
+	r.hookSync.Add(data)
+}
+
+// HookSyncPresent checks whether a hook sync was set by methods hook.
+func (r *Registry) HookSyncPresent() bool {
+	return r.hookSync != nil
+}
+
+// HookSyncMatches checks whether the Log.Data bytes that an FSM.Apply() call
+// is about to process match the ones that were last added to the hookSync via
+// HookSyncAdd().
+func (r *Registry) HookSyncMatches(data []byte) bool {
+	r.hookSyncEnsureSet()
+	return r.hookSync.Matches(data)
+}
+
+// HookSyncWait blocks until the underlying hookSync is done.
+//
+// It assumes that the lock is held, releasing it before blocking and requiring
+// it thereafter.
+func (r *Registry) HookSyncWait() {
+	r.hookSyncEnsureSet()
+	hookSync := r.hookSync
+	r.Unlock()
+	hookSync.Wait()
+	r.Lock()
+}
+
+// HookSyncReset clears the hookSync instance created with HookSyncSet.
+func (r *Registry) HookSyncReset() {
+	r.hookSyncEnsureSet()
+	r.hookSync.Done() // Unblock any FSM.Apply() call waiting on this hookSync.
+	r.hookSync = nil
+}
+
+// Ensure that a hookSync instance is set.
+func (r *Registry) hookSyncEnsureSet() {
+	if r.hookSync == nil {
+		panic("no hookSync instance set on this registry")
+	}
+}
+
+// Ensure that a hookSync instance is not set.
+func (r *Registry) hookSyncEnsureUnset() {
+	if r.hookSync != nil {
+		panic("a hookSync instance is set on this registry")
+	}
+}
+
+// HookSync is used to synchronize a Methods instance and an FSM instance
+// between each other.
+//
+// The goal is that if a replication hook of Methods instance is in progress,
+// the associated FSM instance should only execute log commands applied by that
+// hook, and block the execution of any log command not applied by the hook
+// until the hook returns.
+//
+// The semantics of HookSync is somewhat similar to sync.WaitGroup, and indeed
+// it uses WaitGroup internally. The only additional behavior is really the
+// additional API that checks if a certain command log that the FSM is about to
+// apply was originated by an Apply() call on the same server during a
+// concurrent Methods hook call, or if's a replicated command log that was sent
+// by another server over the network, perhaps right while a Methods hook is
+// finishing up after leadership was lost.
+//
+// The synchronization protocol goes through the following steps:
+//
+//  - The Methods instance starts executing a replication hook.
+//
+//  - The Methods instance acquires the the Registry lock and creates a new
+//    HookSync instance.
+//
+//  - Whenever the Methods instance is about to apply a log command, it calls
+//    HookSync.Add(), which saves the reference to the data bytes slice to be
+//    applied in HookSync.data, and increases by one the the WaitGroup count on
+//    HookSync.wg. The Methods instance then releases the Registry lock.
+//
+//  - The FSM starts executing a log command.
+//
+//  - The FSM acquires the Registry lock and check if a HookSync instance
+//    is set.
+//
+//  - If no HookSync instance is set, the FSM continues normally. This is the
+//    typical case when the FSM is applying logs as follower.
+//
+//  - If the HookSync instance is set and HookSync.Matches() returns true, then
+//    the HookSync.data field matches the Log.Data field of the log command
+//    being applied, and the FSM continues normally.
+//
+//  - If the HookSync instance is set and HookSync.Matches() returns false,
+//    then the HookSync.data field does not match the Log.Data field of the log
+//    command being applied. This means that the FSM is about to apply a log
+//    command that did not originate on this node during the hook execution
+//    (e.g. the FSM is about to apply a log sent from a new leader after this
+//    leader was deposed). The FSM releases the lock on the Registry and calls
+//    HookSync.Wait() which tries to acquire the HookSync.mu lock (which is
+//    being held by the Methods instance running the replication hook).
+//
+//  - When control eventually returns to the Methods instance after the
+//    Raft.Apply() call returns, the Methods instance re-acquires the Registry
+//    lock and resumes the execution of the replication hook. When the hook is
+//    about to finish, the Methods instance calls HookSync.Done(), which
+//    releases all the hookSync.mu reader locks previously acquired. Finally,
+//    the Methods instance releases the Registry lock.
+//
+//  - If the FSM was blocked on HookSync.Wait(), it's now free to proceed.
+//
+// See also Methods.Begin, Methods.Frames, Methods.Undo and FSM.Apply for
+// details.
+type hookSync struct {
+	// A Methods instance hook must call Add(1) aginst this wait group each
+	// time it applies a log command.
+	wg sync.WaitGroup
+
+	// Track the number of Add(1) calls agaist the WaitGroup.
+	n int
+
+	// Reference to the Log.Data payload of the last log command applied by
+	// a Methods hook running on this server.
+	data []byte
+}
+
+func newHookSync() *hookSync {
+	return &hookSync{}
+}
+
+// Add is invoked by a Methods instance before calling Raft.Apply(). It
+// sets the data beying applied and increases the number of lock readers by
+// one.
+//
+// This can be called multiple times by a Methods instance during the execution
+// of a replication hook.
+func (s *hookSync) Add(data []byte) {
+	s.wg.Add(1)
+	s.n++
+	s.data = data
+}
+
+// Matches returns true if the data referenced by this HookSync matches the
+// one of the given raft.Log.Data.
+//
+// This assumes that the hashicorp/raft package does not make a copy of the
+// data slice when invoking FSM.Apply() to apply a log command that originated
+// from a call to Raft.Apply() on this server.
+func (s *hookSync) Matches(data []byte) bool {
+	return reflect.ValueOf(s.data).Pointer() == reflect.ValueOf(data).Pointer()
+}
+
+// Wait blocks until our mutex has no more readers, i.e. the replication hook
+// that created us has completed.
+func (s *hookSync) Wait() {
+	s.wg.Wait()
+}
+
+// Done releases all reader locks acquired during our life cycle.
+func (s *hookSync) Done() {
+	for i := 0; i < s.n; i++ {
+		s.wg.Done()
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/registry.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/registry.go
new file mode 100644
index 0000000000..a88b75d728
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/registry.go
@@ -0,0 +1,149 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+	"testing"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/trace"
+	"github.com/CanonicalLtd/go-dqlite/internal/transaction"
+)
+
+// Registry is a dqlite node-level data structure that tracks:
+//
+// - The directory where dqlite data for this node lives.
+//
+// - All SQLite connections opened on the node, either in leader replication
+//   mode or follower replication mode.
+//
+// - All inflight WAL write transactions, either for leader or follower
+//   connections.
+//
+// - All tracers used to emit trace messages.
+//
+// - Last log index applied by the FSM.
+//
+// A single Registry instance is shared by a single replication.FSM instance, a
+// single replication.Methods instance and a single dqlite.Driver instance.
+//
+// Methods that access or mutate the registry are not thread-safe and must be
+// performed after acquiring the lock. See Lock() and Unlock().
+type Registry struct {
+	mu        sync.Mutex                  // Serialize access to internal state.
+	vfs       *bindings.Vfs               // In-memory file-system
+	leaders   map[*bindings.Conn]string   // Map leader connections to database filenames.
+	followers map[string]*bindings.Conn   // Map database filenames to follower connections.
+	txns      map[uint64]*transaction.Txn // Transactions by ID
+	tracers   *trace.Set                  // Tracers used by this dqlite instance.
+	index     uint64                      // Last log index applied by the dqlite FSM.
+	frames    uint64                      // Number of frames written to the WAL so far.
+	hookSync  *hookSync                   // Used for synchronizing Methods and FSM.
+
+	// Map a connection to its serial number. Serial numbers are guaranteed
+	// to be unique inside the same process.
+	serial map[*bindings.Conn]uint64
+
+	// Circular buffer holding the IDs of the last N transactions that
+	// where successfully committed. It is used to recover a transaction
+	// that errored because of lost leadership but that might actually get
+	// completed because a quorum was reached for the lost commit frames
+	// command log.
+	committed       []uint64
+	committedCursor int
+
+	// Map a leader connection to the ID of the last transaction executed
+	// on it. Used by the driver's Tx implementation to know its ID in case
+	// a client asks for it for recovering a lost commit.
+	lastTxnIDs map[*bindings.Conn]uint64
+
+	// Flag indicating whether transactions state transitions
+	// should actually callback the relevant SQLite APIs. Some
+	// tests need set this flag to true because there's no public
+	// API to acquire the WAL read lock in leader connections.
+	txnDryRun bool
+}
+
+// New creates a new registry.
+//
+// The 'dir' parameter sets the directory where the node associated with this
+// registry will save the SQLite database files.
+func New(vfs *bindings.Vfs) *Registry {
+	tracers := trace.NewSet(250)
+
+	// Register the is the tracer that will be used by the FSM associated
+	// with this registry.
+	tracers.Add("fsm")
+
+	return &Registry{
+		vfs:        vfs,
+		leaders:    map[*bindings.Conn]string{},
+		followers:  map[string]*bindings.Conn{},
+		txns:       map[uint64]*transaction.Txn{},
+		tracers:    tracers,
+		serial:     map[*bindings.Conn]uint64{},
+		committed:  make([]uint64, committedBufferSize),
+		lastTxnIDs: make(map[*bindings.Conn]uint64),
+	}
+}
+
+// Lock the registry.
+func (r *Registry) Lock() {
+	r.mu.Lock()
+}
+
+// Unlock the registry.
+func (r *Registry) Unlock() {
+	r.mu.Unlock()
+}
+
+// Vfs is in-memory VFS used for SQLite databases.
+func (r *Registry) Vfs() *bindings.Vfs {
+	return r.vfs
+}
+
+// Testing sets up this registry for unit-testing.
+//
+// The tracers will forward all entries to the testing logger, using the given
+// node prefix.
+func (r *Registry) Testing(t *testing.T, node int) {
+	r.tracers.Testing(t, node)
+}
+
+// Dump the content of the registry, useful for debugging.
+func (r *Registry) Dump() string {
+	buffer := bytes.NewBuffer(nil)
+	fmt.Fprintf(buffer, "leaders:\n")
+	for conn, name := range r.leaders {
+		fmt.Fprintf(buffer, "-> %d: %s\n", r.ConnSerial(conn), name)
+	}
+	fmt.Fprintf(buffer, "followers:\n")
+	for name, conn := range r.followers {
+		fmt.Fprintf(buffer, "-> %d: %s\n", r.ConnSerial(conn), name)
+	}
+	fmt.Fprintf(buffer, "transactions:\n")
+	for _, txn := range r.txns {
+		fmt.Fprintf(buffer, "-> %s\n", txn)
+	}
+	return buffer.String()
+}
+
+// Keep track of at most this much comitted transactions. This number should be
+// large enough for any real-world situation, where it's unlikely that a client
+// tries to recover a transaction that is so old.
+const committedBufferSize = 10000
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/trace.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/trace.go
new file mode 100644
index 0000000000..db7527ffe1
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/trace.go
@@ -0,0 +1,36 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/trace"
+)
+
+// TracerFSM returns the tracer that should be used by the replication.FSM
+// instance associated with this registry.
+func (r *Registry) TracerFSM() *trace.Tracer {
+	return r.tracers.Get("fsm")
+}
+
+// TracerConn returns the tracer that should be used by the replication.Methods
+// instance associated with this registry when running the given hook for the
+// given connection, which is assumed to be a registered leader connection.
+func (r *Registry) TracerConn(conn *bindings.Conn, hook string) *trace.Tracer {
+	tracer := r.tracers.Get(fmt.Sprintf("methods %d", r.ConnSerial(conn)))
+	return tracer.With(trace.String("hook", hook))
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/txn.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/txn.go
new file mode 100644
index 0000000000..68929193b9
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/registry/txn.go
@@ -0,0 +1,218 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/transaction"
+)
+
+// TxnLeaderAdd adds a new transaction to the registry.
+//
+// The given connection is assumed to be in leader replication mode.
+func (r *Registry) TxnLeaderAdd(conn *bindings.Conn, id uint64) *transaction.Txn {
+	// Check that no other leader connection is registered for the same
+	// filename.
+	filename := r.ConnLeaderFilename(conn)
+	for other := range r.leaders {
+		if other != conn && r.leaders[other] == filename {
+			if txn := r.TxnByConn(other); txn != nil {
+				serial := r.ConnSerial(other)
+				panic(fmt.Sprintf("transaction %s registered on connection %d", txn, serial))
+			}
+		}
+	}
+
+	// Keep track of the ID of the last transaction executed on this
+	// connection.
+	r.lastTxnIDs[conn] = id
+
+	return r.txnAdd(conn, id, true)
+}
+
+// TxnLeaderByFilename returns the leader transaction associated with the given
+// database filename, if any.
+//
+// If there is more than one leader transaction for the same filename, this
+// method panics.
+func (r *Registry) TxnLeaderByFilename(filename string) *transaction.Txn {
+	var found *transaction.Txn
+	for _, txn := range r.txns {
+		if r.leaders[txn.Conn()] == filename {
+			if found != nil {
+				panic("found more than one leader transaction for this database")
+			}
+			found = txn
+		}
+	}
+	return found
+}
+
+// TxnFollowerAdd adds a new follower transaction to the registry.
+//
+// The given connection is assumed to be in follower replication mode. The new
+// transaction will be associated with the given transaction ID, which should
+// match the one of the leader transaction that initiated the write.
+func (r *Registry) TxnFollowerAdd(conn *bindings.Conn, id uint64) *transaction.Txn {
+	return r.txnAdd(conn, id, false)
+}
+
+// TxnFollowerSurrogate creates a surrogate follower transaction.
+//
+// Surrogate follower transactions are used to replace leader transactions when
+// a node loses leadership and are supposed to be undone by the next leader.
+func (r *Registry) TxnFollowerSurrogate(txn *transaction.Txn) *transaction.Txn {
+	if !txn.IsLeader() {
+		panic("expected leader transaction")
+	}
+	r.TxnDel(txn.ID())
+	filename := r.ConnLeaderFilename(txn.Conn())
+	conn := r.ConnFollower(filename)
+	txn = r.TxnFollowerAdd(conn, txn.ID())
+	txn.DryRun()
+
+	return txn
+}
+
+// TxnFollowerResurrected registers a follower transaction created by
+// resurrecting a zombie leader transaction.
+func (r *Registry) TxnFollowerResurrected(txn *transaction.Txn) {
+	if txn.IsLeader() {
+		panic("expected follower transaction")
+	}
+
+	// Delete the zombie leader transaction, which has the same ID.
+	r.TxnDel(txn.ID())
+
+	// Register the new follower transaction.
+	r.txnAdd(txn.Conn(), txn.ID(), false)
+}
+
+// TxnDel deletes the transaction with the given ID.
+func (r *Registry) TxnDel(id uint64) {
+	if _, ok := r.txns[id]; !ok {
+		panic(fmt.Sprintf("attempt to remove unregistered transaction %d", id))
+	}
+
+	delete(r.txns, id)
+}
+
+// TxnByID returns the transaction with the given ID, if it exists.
+func (r *Registry) TxnByID(id uint64) *transaction.Txn {
+	txn, _ := r.txns[id]
+	return txn
+}
+
+// TxnByConn returns the transaction associated with the given connection, if
+// any.
+func (r *Registry) TxnByConn(conn *bindings.Conn) *transaction.Txn {
+	for _, txn := range r.txns {
+		if txn.Conn() == conn {
+			return txn
+		}
+	}
+	return nil
+}
+
+// TxnByFilename returns the transaction associated with the given database
+// filename, if any.
+//
+// If there is more than one transaction for the same filename, this method
+// panics.
+func (r *Registry) TxnByFilename(filename string) *transaction.Txn {
+	conns := make([]*bindings.Conn, 0)
+
+	if conn, ok := r.followers[filename]; ok {
+		conns = append(conns, conn)
+	}
+
+	for conn := range r.leaders {
+		if r.leaders[conn] == filename {
+			conns = append(conns, conn)
+		}
+	}
+
+	var found *transaction.Txn
+	for _, conn := range conns {
+		if txn := r.TxnByConn(conn); txn != nil {
+			if found == nil {
+				found = txn
+			} else {
+				panic("found more than one transaction for this database")
+			}
+		}
+	}
+
+	return found
+}
+
+// TxnDryRun makes transactions only transition between states, without
+// actually invoking the relevant SQLite APIs. This is used by tests and by
+// surrogate followers.
+func (r *Registry) TxnDryRun() {
+	r.txnDryRun = true
+}
+
+// TxnLastID returns the ID of the last transaction executed on the given
+// leader connection.
+func (r *Registry) TxnLastID(conn *bindings.Conn) uint64 {
+	return r.lastTxnIDs[conn]
+}
+
+// TxnCommittedAdd saves the ID of the given transaction in the committed buffer,
+// in case a client needs to check if it can be recovered.
+func (r *Registry) TxnCommittedAdd(txn *transaction.Txn) {
+	r.committed[r.committedCursor] = txn.ID()
+	r.committedCursor++
+	if r.committedCursor == len(r.committed) {
+		// Rollover
+		r.committedCursor = 0
+	}
+}
+
+// TxnCommittedFind scans the comitted buffer and returns true if the given ID
+// is present.
+func (r *Registry) TxnCommittedFind(id uint64) bool {
+	for i := range r.committed {
+		if r.committed[i] == id {
+			return true
+		}
+	}
+	return false
+}
+
+func (r *Registry) txnAdd(conn *bindings.Conn, id uint64, isLeader bool) *transaction.Txn {
+	// Sanity check that a transaction for the same connection hasn't been
+	// registered already. Iterating is fast since there will always be few
+	// write transactions active at given time.
+	if txn := r.TxnByConn(conn); txn != nil {
+		panic(fmt.Sprintf(
+			"a transaction for this connection is already registered with ID %d", txn.ID()))
+	}
+
+	txn := transaction.New(conn, id)
+
+	if isLeader {
+		txn.Leader()
+	} else if r.txnDryRun {
+		txn.DryRun()
+	}
+
+	r.txns[id] = txn
+
+	return txn
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/doc.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/doc.go
new file mode 100644
index 0000000000..29838ef0a8
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package replication implements the core part of dqlite, setting up
+// raft-based replication of the SQLite WAL.
+package replication
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/fsm.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/fsm.go
new file mode 100644
index 0000000000..09a49752b6
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/fsm.go
@@ -0,0 +1,809 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package replication
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"unsafe"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/connection"
+	"github.com/CanonicalLtd/go-dqlite/internal/protocol"
+	"github.com/CanonicalLtd/go-dqlite/internal/registry"
+	"github.com/CanonicalLtd/go-dqlite/internal/trace"
+	"github.com/CanonicalLtd/go-dqlite/internal/transaction"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// FSM implements the raft finite-state machine used to replicate
+// SQLite data.
+type FSM struct {
+	registry *registry.Registry
+
+	// Whether to make Apply panic when an error occurs, or to simply
+	// return an error. This should always be is true except for unit
+	// tests.
+	panicOnFailure bool
+
+	noopBeginTxn uint64 // For upgrades
+}
+
+// NewFSM creates a new Raft state machine for executing dqlite-specific
+// command.
+func NewFSM(registry *registry.Registry) *FSM {
+	return &FSM{
+		registry:       registry,
+		panicOnFailure: true,
+	}
+}
+
+// Apply log is invoked once a log entry is committed.  It returns a value
+// which will be made available in the ApplyFuture returned by Raft.Apply
+// method if that method was called on the same Raft node as the FSM.
+func (f *FSM) Apply(log *raft.Log) interface{} {
+	// Lock the registry for the entire duration of the command
+	// handlers. This is fine since no other write change can happen
+	// anyways while we're running. This might slowdown a bit opening new
+	// leader connections, but since application should be designed to open
+	// their leaders once for all, it shouldn't be a problem in
+	// practice. Read transactions are not be affected by the locking.
+	f.registry.Lock()
+	defer f.registry.Unlock()
+
+	tracer := f.registry.TracerFSM()
+
+	// If we're being invoked in the context of a Methods replication hook
+	// applying a log command, block execution of any log commands coming
+	// on the wire from other leaders until the hook as completed.
+	if f.registry.HookSyncPresent() && !f.registry.HookSyncMatches(log.Data) {
+		tracer.Message("wait for methods hook to complete")
+		// This will temporarily release and re-acquire the registry lock.
+		f.registry.HookSyncWait()
+	}
+
+	err := f.apply(tracer, log)
+	if err != nil {
+		if f.panicOnFailure {
+			tracer.Panic("%v", err)
+		}
+		tracer.Error("apply failed", err)
+		return err
+	}
+
+	return nil
+}
+
+func (f *FSM) apply(tracer *trace.Tracer, log *raft.Log) error {
+	tracer = tracer.With(
+		trace.Integer("term", int64(log.Term)),
+		trace.Integer("index", int64(log.Index)),
+	)
+
+	cmd, err := protocol.UnmarshalCommand(log.Data)
+	if err != nil {
+		return errors.Wrap(err, "corrupted command data")
+	}
+	tracer = tracer.With(trace.String("cmd", cmd.Name()))
+
+	switch payload := cmd.Payload.(type) {
+	case *protocol.Command_Open:
+		err = f.applyOpen(tracer, payload.Open)
+		err = errors.Wrapf(err, "open %s", payload.Open.Name)
+	case *protocol.Command_Begin:
+		err = f.applyBegin(tracer, payload.Begin)
+		err = errors.Wrapf(err, "begin txn %d on %s", payload.Begin.Txid, payload.Begin.Name)
+	case *protocol.Command_Frames:
+		err = f.applyFrames(tracer, payload.Frames)
+		err = errors.Wrapf(err, "wal frames txn %d (%v)", payload.Frames.Txid, payload.Frames.IsCommit)
+	case *protocol.Command_Undo:
+		err = f.applyUndo(tracer, payload.Undo)
+		err = errors.Wrapf(err, "undo txn %d", payload.Undo.Txid)
+	case *protocol.Command_End:
+		err = f.applyEnd(tracer, payload.End)
+		err = errors.Wrapf(err, "end txn %d", payload.End.Txid)
+	case *protocol.Command_Checkpoint:
+		err = f.applyCheckpoint(tracer, payload.Checkpoint)
+		err = errors.Wrapf(err, "checkpoint")
+	default:
+		err = fmt.Errorf("unknown command")
+	}
+
+	if err != nil {
+		tracer.Error("failed", err)
+		return err
+	}
+
+	f.registry.IndexUpdate(log.Index)
+
+	return nil
+}
+
+func (f *FSM) applyOpen(tracer *trace.Tracer, params *protocol.Open) error {
+	tracer = tracer.With(
+		trace.String("name", params.Name),
+	)
+	tracer.Message("start")
+
+	if err := f.openFollower(params.Name); err != nil {
+		return err
+	}
+
+	tracer.Message("done")
+
+	return nil
+}
+
+func (f *FSM) applyBegin(tracer *trace.Tracer, params *protocol.Begin) error {
+	tracer = tracer.With(
+		trace.Integer("txn", int64(params.Txid)),
+	)
+
+	// This FSM command is not needed anymore. We make it a no-op, for
+	// backward compatibility with deployments that do have it stored in
+	// their raft logs.
+	tracer.Message("no-op")
+	f.noopBeginTxn = params.Txid
+
+	return nil
+}
+
+func (f *FSM) applyFrames(tracer *trace.Tracer, params *protocol.Frames) error {
+	tracer = tracer.With(
+		trace.Integer("txn", int64(params.Txid)),
+		trace.Integer("pages", int64(len(params.PageNumbers))),
+		trace.Integer("commit", int64(params.IsCommit)))
+	tracer.Message("start")
+
+	if params.Filename == "" {
+		// Backward compatibility with existing LXD deployments.
+		params.Filename = "db.bin"
+	}
+
+	txn := f.registry.TxnByID(params.Txid)
+	begin := true
+
+	if txn != nil {
+		// We know about this transaction.
+		tracer.Message("txn found %s", txn)
+
+		if txn.IsLeader() {
+			// We're executing a Frames command triggered by the
+			// Methods.Frames hook on this servers.
+			if txn.IsZombie() {
+				// The only way that this can be a zombie is if
+				// this Frames command is being executed by
+				// this FSM after this leader failed with
+				// ErrLeadershipLost, and 1) this server was
+				// re-elected right away and has successfully
+				// retried to apply this command or 2) another
+				// server was elected and a quorum was still
+				// reached for this command log despite the
+				// previous leader not getting notified about
+				// it.
+				if params.IsCommit == 0 {
+					// This is not a commit frames
+					// command. Regardless of whether 1) or
+					// 2) happened, it's safe to create a
+					// surrogate follower transaction and
+					// transition it to Writing.
+					//
+					// If 1) happens, then the next
+					// Methods.Begin hook on this server
+					// will find a leftover Writing
+					// follower and will roll it back with
+					// an Undo command. If 2) happens, same.
+					tracer.Message("create surrogate follower", txn)
+					txn = f.registry.TxnFollowerSurrogate(txn)
+				} else {
+					// This is a commit frames
+					// command. Regardless of whether 1) or
+					// 2) happened, we need to resurrect
+					// the zombie into a follower and
+					// possibly re-apply any non-commit
+					// frames that were applied so far in
+					// the transaction.
+					tracer.Message("recover commit")
+					conn := f.registry.ConnFollower(params.Filename)
+					var err error
+					txn, err = txn.Resurrect(conn)
+					if err != nil {
+						return err
+					}
+					f.registry.TxnFollowerResurrected(txn)
+					begin = txn.State() == transaction.Pending
+				}
+			} else {
+				// We're executing this FSM command in during
+				// the execution of the Methods.Frames hook.
+			}
+
+		} else {
+			// We're executing the Frames command as followers. The
+			// transaction must be in the Writing state.
+			if txn.State() != transaction.Writing {
+				tracer.Panic("unexpected transaction %s", txn)
+			}
+			begin = false
+		}
+	} else {
+		// We don't know about this transaction.
+		//
+		// This is must be a new follower transaction. Let's make sure
+		// that no other transaction against this database is happening
+		// on this server.
+		if txn := f.registry.TxnByFilename(params.Filename); txn != nil {
+			if txn.IsZombie() {
+				// This transactions was left around by a
+				// leader that lost leadership during a Frames
+				// hook that was the first to be sent and did
+				// not reach a quorum, so no other server knows
+				// about it, and now we're starting a new
+				// trasaction initiated by a new leader. We can
+				// just purge it from the registry, since its
+				// state was already rolled back by SQLite
+				// after the xFrames hook failure.
+				tracer.Message("found zombie transaction %s", txn)
+
+				// Perform some sanity checks.
+				if txn.ID() > params.Txid {
+					tracer.Panic("zombie transaction too recent %s", txn)
+				}
+				if txn.State() != transaction.Pending {
+					tracer.Panic("unexpected transaction state %s", txn)
+				}
+
+				tracer.Message("removing stale zombie transaction %s", txn)
+				f.registry.TxnDel(txn.ID())
+			} else {
+				tracer.Panic("unexpected transaction %s", txn)
+			}
+		}
+
+		conn := f.registry.ConnFollower(params.Filename)
+		txn = f.registry.TxnFollowerAdd(conn, params.Txid)
+	}
+
+	if len(params.Pages) != 0 {
+		// This should be a v1 log entry.
+		if len(params.PageNumbers) != 0 || len(params.PageData) != 0 {
+			tracer.Panic("unexpected data mix between v1 and v2")
+		}
+
+		// Convert to v2.
+		params.PageNumbers = make([]uint32, 0)
+		params.PageData = make([]byte, int(params.PageSize)*len(params.Pages))
+
+		for i := range params.Pages {
+			params.PageNumbers = append(params.PageNumbers, params.Pages[i].Number)
+			copy(
+				params.PageData[(i*int(params.PageSize)):((i+1)*int(params.PageSize))],
+				params.Pages[i].Data,
+			)
+		}
+	}
+
+	info := bindings.WalReplicationFrameInfo{}
+	info.IsBegin(begin)
+	info.PageSize(int(params.PageSize))
+	info.Len(len(params.PageNumbers))
+	info.Truncate(uint(params.Truncate))
+
+	isCommit := false
+	if params.IsCommit > 0 {
+		isCommit = true
+	}
+	info.IsCommit(isCommit)
+
+	numbers := make([]bindings.PageNumber, len(params.PageNumbers))
+	for i, pgno := range params.PageNumbers {
+		numbers[i] = bindings.PageNumber(pgno)
+	}
+
+	info.Pages(numbers, unsafe.Pointer(&params.PageData[0]))
+
+	if err := txn.Frames(begin, info); err != nil {
+		return err
+	}
+
+	// If the commit flag is on, this is the final write of a transaction,
+	if isCommit {
+		// Save the ID of this transaction in the buffer of recently committed
+		// transactions.
+		f.registry.TxnCommittedAdd(txn)
+
+		// If it's a follower, we also unregister it.
+		if !txn.IsLeader() {
+			tracer.Message("unregister txn")
+			f.registry.TxnDel(params.Txid)
+		}
+	}
+
+	tracer.Message("done")
+
+	f.noopBeginTxn = 0 // Backward compat.
+
+	return nil
+}
+
+func (f *FSM) applyUndo(tracer *trace.Tracer, params *protocol.Undo) error {
+	tracer = tracer.With(
+		trace.Integer("txn", int64(params.Txid)),
+	)
+	tracer.Message("start")
+
+	txn := f.registry.TxnByID(params.Txid)
+
+	if txn != nil {
+		// We know about this transaction.
+		tracer.Message("txn found: %s", txn)
+	} else {
+		if f.noopBeginTxn != params.Txid {
+			tracer.Panic("txn not found")
+		}
+		f.noopBeginTxn = 0
+		return nil
+	}
+
+	if err := txn.Undo(); err != nil {
+		return err
+	}
+
+	// Let's decide whether to remove the transaction from the registry or
+	// not. The following scenarios are possible:
+	//
+	// 1. This is a non-zombie leader transaction. We can assume that this
+	//    command is being applied in the context of a Methods.Undo() hook
+	//    execution, which will wait for the command to succeed and then
+	//    remove the transaction by itself in the End hook, so no need to
+	//    remove it here.
+	//
+	// 2. This is a follower transaction. We're done here, since undone is
+	//    a final state, so let's remove the transaction.
+	//
+	// 3. This is a zombie leader transaction. This can happen if the
+	//    leader lost leadership when applying the a non-commit frames, but
+	//    the command was still committed (either by us is we were
+	//    re-elected, or by another server if the command still reached a
+	//    quorum). In that case the we're handling an Undo command to
+	//    rollback a dangling transaction, and we have to remove the zombie
+	//    ourselves, because nobody else would do it otherwise.
+	if !txn.IsLeader() || txn.IsZombie() {
+		tracer.Message("unregister txn")
+		f.registry.TxnDel(params.Txid)
+	}
+
+	tracer.Message("done")
+
+	return nil
+}
+
+func (f *FSM) applyEnd(tracer *trace.Tracer, params *protocol.End) error {
+	tracer = tracer.With(
+		trace.Integer("txn", int64(params.Txid)),
+	)
+
+	// This FSM command is not needed anymore. We make it a no-op, for
+	// backward compatibility with deployments that do have it stored in
+	// their raft logs.
+	tracer.Message("no-op")
+
+	return nil
+}
+
+func (f *FSM) applyCheckpoint(tracer *trace.Tracer, params *protocol.Checkpoint) error {
+	tracer = tracer.With(
+		trace.String("file", params.Name),
+	)
+	tracer.Message("start")
+
+	conn := f.registry.ConnFollower(params.Name)
+
+	if txn := f.registry.TxnByConn(conn); txn != nil {
+		// Something went really wrong, a checkpoint should never be issued
+		// while a follower transaction is in flight.
+		tracer.Panic("can't run checkpoint concurrently with transaction %s", txn)
+	}
+
+	// Run the checkpoint.
+	logFrames, checkpointedFrames, err := conn.WalCheckpoint("main", bindings.WalCheckpointTruncate)
+	if err != nil {
+		return err
+	}
+	if logFrames != 0 {
+		tracer.Panic("%d frames are still in the WAL", logFrames)
+	}
+	if checkpointedFrames != 0 {
+		tracer.Panic("only %d frames were checkpointed", checkpointedFrames)
+	}
+
+	tracer.Message("done")
+
+	return nil
+}
+
+// Snapshot is used to support log compaction.
+//
+// From the raft's package documentation:
+//
+//   "This call should return an FSMSnapshot which can be used to save a
+//   point-in-time snapshot of the FSM. Apply and Snapshot are not called in
+//   multiple threads, but Apply will be called concurrently with Persist. This
+//   means the FSM should be implemented in a fashion that allows for
+//   concurrent updates while a snapshot is happening."
+//
+// In dqlite's case we do the following:
+//
+// - For each database that we track (i.e. that we have a follower connection
+//   for), create a backup using sqlite3_backup() and then read the content of
+//   the backup file and the current WAL file. Since nothing else is writing to
+//   the database (FSM.Apply won't be called until FSM.Snapshot completes), we
+//   could probably read the database bytes directly to increase efficiency,
+//   but for now we do concurrent-write-safe backup as good measure.
+//
+// - For each database we track, look for ongoing transactions and include
+//   their ID in the FSM snapshot, so their state can be re-created upon
+//   snapshot Restore.
+//
+// This is a bit heavy-weight but should be safe. Optimizations can be added as
+// needed.
+func (f *FSM) Snapshot() (raft.FSMSnapshot, error) {
+	f.registry.Lock()
+	defer f.registry.Unlock()
+
+	tracer := f.registry.TracerFSM()
+
+	databases := []*fsmDatabaseSnapshot{}
+
+	// Loop through all known databases and create a backup for each of
+	// them. The filenames associated with follower connections uniquely
+	// identify all known databases, since there will be one and only
+	// follower connection for each known database (we never close follower
+	// connections since database deletion is not supported).
+	for _, filename := range f.registry.ConnFollowerFilenames() {
+		database, err := f.snapshotDatabase(tracer, filename)
+		if err != nil {
+			err = errors.Wrapf(err, "%s", filename)
+			tracer.Error("database snapshot failed", err)
+			return nil, err
+		}
+		databases = append(databases, database)
+	}
+
+	return &FSMSnapshot{
+		index:     f.registry.Index(),
+		databases: databases,
+	}, nil
+}
+
+// Backup a single database.
+func (f *FSM) snapshotDatabase(tracer *trace.Tracer, filename string) (*fsmDatabaseSnapshot, error) {
+	tracer = tracer.With(trace.String("snapshot", filename))
+	tracer.Message("start")
+
+	// Figure out if there is an ongoing transaction associated with any of
+	// the database connections, if so we'll return an error.
+	conns := f.registry.ConnLeaders(filename)
+	conns = append(conns, f.registry.ConnFollower(filename))
+	txid := ""
+	for _, conn := range conns {
+		if txn := f.registry.TxnByConn(conn); txn != nil {
+			// XXX TODO: If we let started transaction in the
+			// snapshot, the TestIntegration_Snapshot crashes with:
+			//
+			// panic: unexpected follower transaction 7 started as follower
+			//
+			// figure out why.
+			//if txn.State() == transaction.Writing {
+			tracer.Message("transaction %s is in progress", txn)
+			return nil, fmt.Errorf("transaction %s is in progress", txn)
+			//}
+			// We'll save the transaction ID in the snapshot.
+			//tracer.Message("idle transaction %s", txn)
+			//txid = strconv.FormatUint(txn.ID(), 10)
+		}
+	}
+
+	database, wal, err := connection.Snapshot(f.registry.Vfs(), filename)
+	if err != nil {
+		return nil, err
+	}
+
+	tracer.Message("done")
+
+	return &fsmDatabaseSnapshot{
+		filename: filename,
+		database: database,
+		wal:      wal,
+		txid:     txid,
+	}, nil
+}
+
+// Restore is used to restore an FSM from a snapshot. It is not called
+// concurrently with any other command. The FSM must discard all previous
+// state.
+func (f *FSM) Restore(reader io.ReadCloser) error {
+	f.registry.Lock()
+	defer f.registry.Unlock()
+
+	tracer := f.registry.TracerFSM()
+
+	// The first 8 bytes contain the FSM Raft log index.
+	var index uint64
+	if err := binary.Read(reader, binary.LittleEndian, &index); err != nil {
+		return errors.Wrap(err, "failed to read FSM index")
+	}
+
+	tracer = tracer.With(trace.Integer("restore", int64(index)))
+	tracer.Message("start")
+
+	f.registry.IndexUpdate(index)
+
+	for {
+		done, err := f.restoreDatabase(tracer, reader)
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+	}
+
+	tracer.Message("done")
+
+	return nil
+}
+
+// Restore a single database. Returns true if there are more databases
+// to restore, false otherwise.
+func (f *FSM) restoreDatabase(tracer *trace.Tracer, reader io.ReadCloser) (bool, error) {
+	done := false
+
+	// The first 8 bytes contain the size of database.
+	var dataSize uint64
+	if err := binary.Read(reader, binary.LittleEndian, &dataSize); err != nil {
+		return false, errors.Wrap(err, "failed to read database size")
+	}
+	tracer.Message("database size: %d", dataSize)
+
+	// Then there's the database data.
+	data := make([]byte, dataSize)
+	if _, err := io.ReadFull(reader, data); err != nil {
+		return false, errors.Wrap(err, "failed to read database data")
+	}
+
+	// Next, the size of the WAL.
+	var walSize uint64
+	if err := binary.Read(reader, binary.LittleEndian, &walSize); err != nil {
+		return false, errors.Wrap(err, "failed to read wal size")
+	}
+	tracer.Message("wal size: %d", walSize)
+
+	// Read the WAL data.
+	wal := make([]byte, walSize)
+	if _, err := io.ReadFull(reader, wal); err != nil {
+		return false, errors.Wrap(err, "failed to read wal data")
+	}
+
+	// Read the database path.
+	bufReader := bufio.NewReader(reader)
+	filename, err := bufReader.ReadString(0)
+	if err != nil {
+		return false, errors.Wrap(err, "failed to read database name")
+	}
+	filename = filename[:len(filename)-1] // Strip the trailing 0
+	tracer.Message("filename: %s", filename)
+
+	// XXX TODO: reason about this situation, is it harmful?
+	// Check that there are no leader connections for this database.
+	//
+	// FIXME: we should relax this, as it prevents restoring snapshots "on
+	// the fly".
+	// conns := f.registry.ConnLeaders(filename)
+	// if len(conns) > 0 {
+	// 	tracer.Panic("found %d leader connections", len(conns))
+	// }
+
+	// XXX TODO: reason about this situation, is it possible?
+	//txn := f.transactions.GetByConn(f.connections.Follower(name))
+	//if txn != nil {
+	//	f.logger.Printf("[WARN] dqlite: fsm: closing follower in-flight transaction %s", txn)
+	//	f.transactions.Remove(txn.ID())
+	//}
+
+	// Close any follower connection, since we're going to overwrite the
+	// database file.
+	if f.registry.ConnFollowerExists(filename) {
+		tracer.Message("close follower: %s", filename)
+		follower := f.registry.ConnFollower(filename)
+		f.registry.ConnFollowerDel(filename)
+		if err := follower.Close(); err != nil {
+			return false, err
+		}
+	}
+
+	// At this point there should be not connection open against this
+	// database, so it's safe to overwrite it.
+	txid, err := bufReader.ReadString(0)
+	if err != nil {
+		if err != io.EOF {
+			return false, errors.Wrap(err, "failed to read txid")
+		}
+		done = true // This is the last database.
+	}
+	tracer.Message("transaction ID: %s", txid)
+
+	vfs := f.registry.Vfs()
+
+	if err := connection.Restore(vfs, filename, data, wal); err != nil {
+		return false, err
+	}
+
+	tracer.Message("open follower: %s", filename)
+	if err := f.openFollower(filename); err != nil {
+		return false, err
+	}
+
+	if txid != "" {
+		// txid, err := strconv.ParseUint(txid, 10, 64)
+		// if err != nil {
+		// 	return false, err
+		// }
+		// tracer.Message("add transaction: %d", txid)
+		// conn := f.registry.ConnFollower(filename)
+		// txn := f.registry.TxnFollowerAdd(conn, txid)
+		// if err := txn.Begin(); err != nil {
+		// 	return false, err
+		// }
+	}
+
+	return done, nil
+}
+
+func (f *FSM) openFollower(filename string) error {
+	vfs := f.registry.Vfs().Name()
+	conn, err := bindings.Open(filename, vfs)
+	if err != nil {
+		return errors.Wrap(err, "failed to open connection")
+	}
+
+	err = conn.Exec("PRAGMA synchronous=OFF")
+	if err != nil {
+		return errors.Wrap(err, "failed to disable syncs")
+	}
+
+	err = conn.Exec("PRAGMA journal_mode=wal")
+	if err != nil {
+		return errors.Wrap(err, "failed to set WAL mode")
+	}
+
+	_, err = conn.ConfigNoCkptOnClose(true)
+	if err != nil {
+		return errors.Wrap(err, "failed to disable checkpoints on close")
+	}
+
+	err = conn.WalReplicationFollower()
+	if err != nil {
+		return errors.Wrap(err, "failed to set follower replication mode")
+	}
+
+	f.registry.ConnFollowerAdd(filename, conn)
+
+	return nil
+}
+
+// FSMSnapshot is returned by an FSM in response to a Snapshot
+// It must be safe to invoke FSMSnapshot methods with concurrent
+// calls to Apply.
+type FSMSnapshot struct {
+	index     uint64
+	databases []*fsmDatabaseSnapshot
+}
+
+// Persist should dump all necessary state to the WriteCloser 'sink',
+// and call sink.Close() when finished or call sink.Cancel() on error.
+func (s *FSMSnapshot) Persist(sink raft.SnapshotSink) error {
+	// First, write the FSM index.
+	buffer := new(bytes.Buffer)
+	if err := binary.Write(buffer, binary.LittleEndian, s.index); err != nil {
+		return errors.Wrap(err, "failed to FSM index")
+	}
+	if _, err := sink.Write(buffer.Bytes()); err != nil {
+		return errors.Wrap(err, "failed to write FSM index to sink")
+	}
+
+	// Then write the individual databases.
+	for _, database := range s.databases {
+		if err := s.persistDatabase(sink, database); err != nil {
+			sink.Cancel()
+			return err
+		}
+
+	}
+
+	if err := sink.Close(); err != nil {
+		sink.Cancel()
+		return err
+	}
+
+	return nil
+}
+
+// Persist a single daabase snapshot.
+func (s *FSMSnapshot) persistDatabase(sink raft.SnapshotSink, database *fsmDatabaseSnapshot) error {
+	// Start by writing the size of the backup
+	buffer := new(bytes.Buffer)
+	dataSize := uint64(len(database.database))
+	if err := binary.Write(buffer, binary.LittleEndian, dataSize); err != nil {
+		return errors.Wrap(err, "failed to encode data size")
+	}
+	if _, err := sink.Write(buffer.Bytes()); err != nil {
+		return errors.Wrap(err, "failed to write data size to sink")
+	}
+
+	// Next write the data to the sink.
+	if _, err := sink.Write(database.database); err != nil {
+		return errors.Wrap(err, "failed to write backup data to sink")
+
+	}
+
+	buffer.Reset()
+	walSize := uint64(len(database.wal))
+	if err := binary.Write(buffer, binary.LittleEndian, walSize); err != nil {
+		return errors.Wrap(err, "failed to encode wal size")
+	}
+	if _, err := sink.Write(buffer.Bytes()); err != nil {
+		return errors.Wrap(err, "failed to write wal size to sink")
+	}
+	if _, err := sink.Write(database.wal); err != nil {
+		return errors.Wrap(err, "failed to write backup data to sink")
+
+	}
+
+	// Next write the database name.
+	buffer.Reset()
+	buffer.WriteString(database.filename)
+	if _, err := sink.Write(buffer.Bytes()); err != nil {
+		return errors.Wrap(err, "failed to write database name to sink")
+	}
+	if _, err := sink.Write([]byte{0}); err != nil {
+		return errors.Wrap(err, "failed to write database name delimiter to sink")
+	}
+
+	// FInally write the current transaction ID, if any.
+	buffer.Reset()
+	buffer.WriteString(database.txid)
+	if _, err := sink.Write(buffer.Bytes()); err != nil {
+		return errors.Wrap(err, "failed to write txid to sink")
+	}
+
+	return nil
+}
+
+// Release is invoked when we are finished with the snapshot.
+func (s *FSMSnapshot) Release() {
+}
+
+// fsmDatabaseSnapshot holds backup information for a single database.
+type fsmDatabaseSnapshot struct {
+	filename string
+	database []byte
+	wal      []byte
+	txid     string
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/methods.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/methods.go
new file mode 100644
index 0000000000..7278b48136
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/methods.go
@@ -0,0 +1,794 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package replication
+
+import (
+	"sync"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/protocol"
+	"github.com/CanonicalLtd/go-dqlite/internal/registry"
+	"github.com/CanonicalLtd/go-dqlite/internal/trace"
+	"github.com/CanonicalLtd/go-dqlite/internal/transaction"
+	"github.com/hashicorp/raft"
+)
+
+// Methods implements the SQLite replication C API using the sqlite3 bindings.
+type Methods struct {
+	registry     *registry.Registry
+	raft         *raft.Raft   // Raft engine to use
+	mu           sync.RWMutex // TODO: make this lock per-database.
+	applyTimeout time.Duration
+
+	// If greater than zero, skip the initial not-leader checks, this
+	// amount of times. Only used for testing.
+	noLeaderCheck int
+}
+
+// NewMethods returns a new Methods instance that can be used as callbacks API
+// for raft-based SQLite replication of a single connection.
+func NewMethods(reg *registry.Registry, raft *raft.Raft) *Methods {
+	return &Methods{
+		registry:     reg,
+		raft:         raft,
+		applyTimeout: 10 * time.Second,
+	}
+}
+
+// ApplyTimeout sets the maximum amount of time to wait before giving
+// up applying a raft command. The default is 10 seconds.
+func (m *Methods) ApplyTimeout(timeout time.Duration) {
+	m.applyTimeout = timeout
+}
+
+// Begin is the hook invoked by SQLite when a new write transaction is
+// being started within a connection in leader replication mode on
+// this server.
+func (m *Methods) Begin(conn *bindings.Conn) int {
+	// We take a the lock for the entire duration of the hook to avoid
+	// races between to concurrent hooks.
+	//
+	// The main tasks of Begin are to check that no other write transaction
+	// is in progress and to cleanup any dangling follower transactions
+	// that might have been left open after a leadership change.
+	//
+	// Concurrent calls can happen because the xBegin hook is fired by
+	// SQLite before acquiring a write lock on the WAL (i.e. before calling
+	// WalBeginWriteTransaction), so different connections can enter the
+	// Begin hook at any time .
+	//
+	// Some races that is worth mentioning are:
+	//
+	//  - Two concurrent calls of Begin: without the lock, they would race
+	//    to open a follower connection if it does not exists yet.
+	//
+	//  - Two concurrent calls of Begin and another hook: without the lock,
+	//    they would race to mutate the registry (e.g. add/delete
+	//    transactions).
+	//
+	// The most common errors that Begin returns are:
+	//
+	//  - SQLITE_BUSY:  If we detect that a write transaction is in progress
+	//                  on another connection. The SQLite's call to sqlite3PagerBegin
+	//                  that triggered the xBegin hook will propagate the error
+	//                  to sqlite3BtreeBeginTrans, which will invoke the busy
+	//                  handler (if set), calling xBegin/Begin again, which will
+	//                  keep returning SQLITE_BUSY as long as the other transaction
+	//                  is in progress. If the busy handler gives up, the SQLITE_BUSY
+	//                  error will bubble up, and the statement that triggered the
+	//                  write attempt will fail. The client should then execute a
+	//                  ROLLBACK and then decide what to do.
+	//
+	//  - SQLITE_IOERR: This is returned if we are not the leader when the
+	//                  hook fires or if we fail to apply the Open follower
+	//                  command log (in case no follower for this database
+	//                  is open yet). We include the relevant extended
+	//                  code, either SQLITE_IOERR_NOT_LEADER if this server
+	//                  is not the leader anymore or it is being shutdown,
+	//                  or SQLITE_IOERR_LEADERSHIP_LOST if leadership was
+	//                  lost while applying the Open command. The SQLite's
+	//                  call to sqlite3PagerBegin that triggered the xBegin
+	//                  hook will propagate the error to sqlite3BtreeBeginTrans,
+	//                  which will in turn propagate it to the OP_Transaction case
+	//                  of vdbe.c, which will goto abort_due_to_error and finally
+	//                  call sqlite3VdbeHalt, automatically rolling back the
+	//                  transaction. Since no write transaction was actually started the
+	//                  xEnd hook is not fired.
+	//
+	// We might return SQLITE_INTERRUPT or SQLITE_INTERNAL in case of more exotic
+	// failures. See the apply() method for details.
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// Lock the registry, to avoid races with the FSM when checking for the
+	// presence of a hook sync and when modifying transactions.
+	m.registry.Lock()
+	defer m.registry.Unlock()
+
+	// Enable synchronization with the FSM: it will only execute commands
+	// applied during this hook, and block applying any other command until
+	// this hook is done.
+	m.registry.HookSyncSet()
+	defer m.registry.HookSyncReset()
+
+	tracer := m.registry.TracerConn(conn, "begin")
+	tracer.Message("start")
+
+	// Check if we're the leader.
+	if m.noLeaderCheck == 0 && m.raft.State() != raft.Leader {
+		// No dqlite state has been modified, and the WAL write lock
+		// has not been acquired. Just return ErrIoErrNotLeader.
+		tracer.Message("not leader")
+		return bindings.ErrIoErrNotLeader
+	}
+
+	// Update the noLeaderCheck counter (used for tests).
+	if m.noLeaderCheck > 0 {
+		m.noLeaderCheck--
+	}
+
+	// Possibly open a follower for this database if it doesn't exist yet.
+	if err := m.beginMaybeAddFollowerConn(tracer, conn); err != nil {
+		// Since we haven't yet registered a transaction, there's no
+		// cleanup to do here. The worst that can happen is that the
+		// Raft.Apply() call failed with ErrLeadershipLost and a quorum
+		// for the log will actually be reached. In that case all FSMs
+		// (including our own) will apply the open command.
+		return errno(err)
+	}
+
+	// Check whether there is already an an ongoing transaction.
+	if err := m.beginMaybeHandleInProgressTxn(tracer, conn); err != nil {
+		return errno(err)
+	}
+
+	// Use the last applied index as transaction ID.
+	//
+	// If this server is still the leader, this number is guaranteed to be
+	// strictly higher than any previous transaction ID, since after a
+	// leadership change we always call raft.Barrier() to advance the FSM
+	// up to the latest committed log, raft.Barrier() itself will increment
+	// the applied index by one.
+	//
+	// If it's not the leader anymore, it does not matter which ID we pick
+	// because any coming Frames or Undo hook will fail with ErrNotLeader.
+	txid := m.raft.AppliedIndex()
+
+	tracer = tracer.With(trace.Integer("txn", int64(txid)))
+	tracer.Message("register transaction")
+
+	// Create a new transaction.
+	m.registry.TxnLeaderAdd(conn, txid)
+
+	tracer.Message("done")
+
+	return 0
+}
+
+// Check if a follower connection is already open for this database, if not
+// open one with the Open raft command.
+func (m *Methods) beginMaybeAddFollowerConn(tracer *trace.Tracer, conn *bindings.Conn) error {
+	filename := m.registry.ConnLeaderFilename(conn)
+
+	if m.registry.ConnFollowerExists(filename) {
+		return nil
+	}
+
+	tracer.Message("open follower for %s", filename)
+	return m.apply(tracer, conn, protocol.NewOpen(filename))
+}
+
+// This method ensures that there is no other write transactions happening
+// on this node against database associated to the given connection.
+//
+// If one is found, this method will try take appropriate measures.
+//
+// If an error is returned, Begin should stop and return it.
+func (m *Methods) beginMaybeHandleInProgressTxn(tracer *trace.Tracer, conn *bindings.Conn) error {
+	filename := m.registry.ConnLeaderFilename(conn)
+	txn := m.registry.TxnByFilename(filename)
+	if txn == nil {
+		return nil
+	}
+
+	tracer.Message("found in-progress transaction %s", txn)
+
+	// Check if the in-progress transaction is a concurrent leader.
+	if txn.IsLeader() {
+		if txn.Conn() != conn {
+			// This means that there is a transaction in progress
+			// originated on this Methods instance for another
+			// connection.
+			//
+			// We can't proceed as the Begin method would then try
+			// to add a new transaction to the registry and crash.
+			//
+			// No dqlite state has been modified, and the WAL write
+			// lock has not been acquired.
+			//
+			// We just return ErrBusy, which has the same effect as
+			// the call to sqlite3WalBeginWriteTransaction (invoked
+			// in pager.c after a successful xBegin) would have, i.e.
+			// the busy handler will end up polling us again until
+			// the concurrent write transaction ends and we're free
+			// to go.
+			tracer.Message("busy")
+			return bindings.Error{Code: bindings.ErrBusy}
+		}
+
+		// There a's transaction originated on this Methods instance for
+		// the same connection.
+		if !txn.IsZombie() {
+			// This should be an impossible situation since it
+			// would mean that the same connection managed to begin
+			// a new transaction on the same connection, something
+			// that SQLite prevents.
+			tracer.Panic("unexpected transaction on same connection %s", txn)
+		}
+
+		// If we have a zombie for this connection it, must mean that a
+		// Frames command failed because we were not leaders anymore at
+		// that time and this was a commit frames command following one
+		// or more non-commit frames commands that were successfully
+		// applied.
+		if txn.State() != transaction.Writing {
+			tracer.Panic("unexpected transaction %s", txn)
+		}
+
+		// Create a surrogate follower and revert the transaction just
+		// below.
+		m.surrogateWriting(tracer, txn)
+	}
+
+	tracer.Message("undo stale transaction %s", txn)
+	if err := m.apply(tracer, conn, protocol.NewUndo(txn.ID())); err != nil {
+		// Whatever the reason of the failure is (not leader or
+		// leadeship lost), we can leave things as they are,
+		// since the next leader should try to run again the
+		// undo command.
+		return err
+	}
+
+	return nil
+}
+
+// Abort is the hook invoked by SQLite when a write transaction fails
+// to begin.
+func (m *Methods) Abort(conn *bindings.Conn) int {
+	// We take a the lock for the entire duration of the hook to avoid
+	// races between to cocurrent hooks.
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// Lock the registry.
+	m.registry.Lock()
+	defer m.registry.Unlock()
+
+	tracer := m.registry.TracerConn(conn, "abort")
+	tracer.Message("start")
+
+	// This is only called if SQLite fails to start a WAL write transaction.
+	txn := m.registry.TxnByConn(conn)
+	if txn == nil {
+		tracer.Panic("no in-progress transaction")
+	}
+	tracer.Message("found txn %s", txn)
+
+	// Sanity checks.
+	if !txn.IsLeader() || txn.Conn() != conn {
+		tracer.Panic("unexpected transaction %s", txn)
+	}
+	if txn.State() != transaction.Pending {
+		tracer.Panic("unexpected transaction state %s", txn)
+	}
+
+	tracer.Message("discard aborted transaction")
+	m.registry.TxnDel(txn.ID())
+
+	return 0
+}
+
+// Frames is the hook invoked by sqlite when new frames need to be
+// flushed to the write-ahead log.
+func (m *Methods) Frames(conn *bindings.Conn, frames bindings.WalReplicationFrameList) int {
+	// We take a the lock for the entire duration of the hook to avoid
+	// races between to cocurrent hooks. See the comments in Begin for more
+	// details.
+	//
+	// The xFrames hook is invoked by the SQLite pager in two cases, either
+	// in sqlite3PagerCommitPhaseOne (for committing) or in pagerStress (for
+	// flushing dirty pages to disk, invoked by the pcache implementation when
+	// no more space is available in the built-in page cache).
+	//
+	// In the first case, any error returned here will be propagated up to
+	// sqlite3VdbeHalt (the final step of SQLite's VDBE), which will rollback
+	// the transaction and indirectly invoke sqlite3PagerRollback which in turn
+	// will indirectly fire xUndo and xEnd.
+	//
+	// In the second case, any error returned here will transition the
+	// pager to the ERROR state (see the final pager_error call in
+	// pagerStress) and will be propagated first to sqlite3PcacheFetchStress
+	// and the indirectly to the btree layer which will automatically rollback
+	// the transaction. The xUndo and xEnd hooks won't be fired, since the
+	// pager is in an error state.
+	//
+	// The most common errors that Frames returns are:
+	//
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// Lock the registry.
+	m.registry.Lock()
+	defer m.registry.Unlock()
+
+	// Enable synchronization with the FSM: it will only execute commands
+	// applied during this hook, and block applying any other command until
+	// this hook is done.
+	m.registry.HookSyncSet()
+	defer m.registry.HookSyncReset()
+
+	tracer := m.registry.TracerConn(conn, "frames")
+	tracer.Message("start (commit=%v)", frames.IsCommit())
+
+	txn := m.registry.TxnByConn(conn)
+	if txn == nil {
+		tracer.Panic("no in-progress transaction")
+	}
+	tracer.Message("found txn %s", txn)
+
+	// Sanity checks.
+	if !txn.IsLeader() {
+		tracer.Panic("unexpected transaction %s", txn)
+	}
+	if txn.State() != transaction.Pending && txn.State() != transaction.Writing {
+		tracer.Panic("unexpected transaction state %s", txn)
+	}
+
+	// Check if we're the leader.
+	if m.noLeaderCheck == 0 && m.raft.State() != raft.Leader {
+		return errno(m.framesNotLeader(tracer, txn))
+	}
+
+	// Update the noLeaderCheck counter.
+	if m.noLeaderCheck > 0 {
+		m.noLeaderCheck--
+	}
+
+	filename := m.registry.ConnLeaderFilename(conn)
+	command := protocol.NewFrames(txn.ID(), filename, frames)
+	if err := m.apply(tracer, conn, command); err != nil {
+		// Check that transaction is still Pending or Writing. The hook-sync
+		// mechanism prevents our FSM to apply anything else, but let's
+		// assert it for sanity.
+		if txn.State() != transaction.Pending && txn.State() != transaction.Writing {
+			tracer.Panic("unexpected transaction state: %s", txn)
+		}
+
+		if isErrNotLeader(err) {
+			// This is relatively unlikely since we already checked
+			// for leadership at the beginning of the hook, but
+			// still possible in principle with a particular bad
+			// timing.
+			//
+			// The same logic applies.
+			//
+			// We can be sure that the Frames command didn't get
+			// committed, so we can just mark the transaction as
+			// stale, create a surrogate follower and return. The
+			// Undo hook that will be fired right after and will
+			// no-op.
+			return errno(m.framesNotLeader(tracer, txn))
+		} else if isErrLeadershipLost(err) {
+			if frames.IsCommit() {
+				// Mark the transaction as zombie. Possible scenarios:
+				//
+				// 1. This server gets re-elected right away as leader.
+				//
+				//    In this case we'll try to apply this lost
+				//    command log again. If we succeed, our FSM
+				//    will transition this zombie transaction
+				//    into to a surrogate follower and our next
+				//    Begin hook invokation will issue an Undo
+				//    command, which (if successfull) will be a
+				//    no-op on our FSM and an actual rollback
+				//    on the followers (regardless of whether
+				//    this was the first non-commit frames
+				//    command or a further one). If we fail to
+				//    re-apply the command there will be a new
+				//    election, and we'll end up again in
+				//    either this case (1) or the next one
+				//    (2). Same if the Undo command fails.
+				//
+				// 2. Another server gets elected as leader.
+				//
+				//    In this case there are two possible
+				//    scenarios.
+				//
+				//    2.1. No quorum was reached for the lost
+				//         commit command. This means that no
+				//         FSM (including ours) will ever try
+				//         to apply it. If this lost non-commit
+				//         frames command was the first one of
+				//         a transaction, the new leader will
+				//         see no dangling follower and will
+				//         just start a new transaction with a
+				//         new ID, sending a Frames command to
+				//         our FSM. Our FSM will detect the
+				//         zombie transaction and simply purge
+				//         it from the registry.
+				//
+				//    2.2 A quorum was reached for the lost
+				//        commit command. This means that the
+				//        new leader will replicate it to every
+				//        server that didn't apply it yet,
+				//        which includes us, and then issue an
+				//        Undo command to abort the
+				//        transaction. In this case our FSM
+				//        will behave like in case 1.
+				tracer.Message("marking as zombie")
+				txn.Zombie()
+			} else {
+				// Mark the transaction as zombie. Possible scenarios:
+				//
+				// 1. This server gets re-elected right away as leader.
+				//
+				//    In this case we'll try to apply this lost
+				//    command log again. If we succeed, two
+				//    scenarios are possible:
+				//
+				//    1.1 This was the only Frames command in
+				//        the transaction, our FSM will convert
+				//        the zombie transaction into a
+				//        follower transaction, and apply it
+				//        normally, effectively recovering the
+				//        commit failure.
+				//
+				//    2.1 This was the last Frames command in a
+				//        series of one or more non-commit
+				//        Frames commands, which were all
+				//        applied.
+				//
+				// 2. Another server gets elected as leader.
+				//
+				//    In this case there are two possible
+				//    scenarios.
+				//
+				//    2.1. No quorum was reached for the lost
+				//         commit command. This means that no
+				//         FSM (including ours) will ever try
+				//         to apply it. If this lost commit
+				//         frames command was the first one of
+				//         a transaction, the new leader will
+				//         see no dangling follower and will
+				//         just start a new transaction with a
+				//         new ID, sending a Frames command to
+				//         our FSM. Our FSM will detect the
+				//         zombie transaction and simply purge
+				//         it from the registry.
+				//
+				//    2.2 A quorum was reached for the lost
+				//        commit command. This means that the
+				//        new leader will replicate it to every
+				//        server that didn't apply it yet,
+				//        which includes us. In this case our
+				//        FSM will detect the zombie and
+				//        resurrect it using the follower
+				//        connection for this database, and
+				//        possibly writing all preceeding
+				//        non-commit frames to fully recover
+				//        the transaction (which was originally
+				//        rolled back on this server).
+				tracer.Message("marking as zombie")
+				txn.Zombie()
+			}
+		}
+
+		// TODO: under which circumstances can we get errors other than
+		// NotLeader/RaftShutdown and LeadershipLost? How to handle
+		// them? See also the comments in the apply() method.
+
+		return errno(err)
+	}
+
+	tracer.Message("done")
+
+	return 0
+}
+
+// Handle Frames failures due to this server not not being the leader.
+func (m *Methods) framesNotLeader(tracer *trace.Tracer, txn *transaction.Txn) error {
+	if txn.State() == transaction.Pending {
+		// No Frames command was applied, so followers don't
+		// know about this transaction. We don't need to do
+		// anything special, the xUndo hook will just remove
+		// it.
+		tracer.Message("no frames command applied")
+	} else {
+		// At least one Frames command was applied, so the transaction
+		// exists on the followers. We mark the transaction as zombie,
+		// the Begin() hook of next leader (either us or somebody else)
+		// will detect a dangling transaction and issue an Undo command
+		// to roll it back. In its applyUndo method our FSM will detect
+		// that the rollback is for a zombie and just no-op it.
+		tracer.Message("marking as zombie")
+		txn.Zombie()
+	}
+
+	// When we return an error, SQLite will fire the End hook.
+	tracer.Message("not leader")
+
+	return bindings.Error{Code: bindings.ErrIoErrNotLeader}
+}
+
+// Undo is the hook invoked by sqlite when a write transaction needs
+// to be rolled back.
+func (m *Methods) Undo(conn *bindings.Conn) int {
+	// We take a the lock for the entire duration of the hook to avoid
+	// races between to cocurrent hooks.
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// Lock the registry.
+	m.registry.Lock()
+	defer m.registry.Unlock()
+
+	// Enable synchronization with the FSM: it will only execute commands
+	// applied during this hook, and block applying any other command until
+	// this hook is done.
+	m.registry.HookSyncSet()
+	defer m.registry.HookSyncReset()
+
+	tracer := m.registry.TracerConn(conn, "undo")
+	tracer.Message("start")
+
+	txn := m.registry.TxnByConn(conn)
+	if txn == nil {
+		tracer.Panic("no in-progress transaction")
+	}
+	tracer.Message("found txn %s", txn)
+
+	// Sanity check.
+	if !txn.IsLeader() {
+		tracer.Panic("unexpected transaction %s", txn)
+	}
+
+	if txn.IsZombie() {
+		// This zombie originated from the Frames hook. There are two scenarios:
+		//
+		// 1. Leadership was lost while applying the Frames command.
+		//
+		//    We can't simply remove the transaction since the Frames
+		//    command might eventually get committed. We just ignore
+		//    it, and let it handle by our FSM in that case (i.e. if we
+		//    are re-elected or a quorum was reached and another leader
+		//    tries to apply it).
+		//
+		// 2. This server was not the leader anymore when the Frames
+		//    hook fired for a commit frames batch which was the last
+		//    of a sequence of non-commit ones.
+		//
+		//    In this case we're being invoked by SQLite which is
+		//    trying to rollback the transaction. We can't simply
+		//    remove the transaction since the next leader will detect
+		//    a dangling transaction and try to issue an Undo
+		//    command. We just ignore the zombie and let our FSM handle
+		//    it when the Undo command will be applied.
+		tracer.Message("done: ignore zombie")
+		return 0
+	}
+
+	if txn.State() == transaction.Pending {
+		// This means that the Undo hook fired because this node was
+		// not the leader when trying to apply the first Frames
+		// command, so no follower knows about it. We can just return,
+		// the transaction will be removed by the End hook.
+		tracer.Message("done: no frames command was sent")
+		return 0
+	}
+
+	// Check if we're the leader.
+	if m.noLeaderCheck == 0 && m.raft.State() != raft.Leader {
+		// If we have lost leadership we're in a state where the
+		// transaction began on this node and a quorum of followers. We
+		// return an error, and SQLite will ignore it, however we
+		// need to create a surrogate follower, so the next leader will try to
+		// undo it across all nodes.
+		tracer.Message("not leader")
+		m.surrogateWriting(tracer, txn)
+		return bindings.ErrIoErrNotLeader
+	}
+
+	// Update the noLeaderCheck counter.
+	if m.noLeaderCheck > 0 {
+		m.noLeaderCheck--
+	}
+
+	// We don't really care whether the Undo command applied just below here
+	// will be committed or not.If the command fails, we'll create a
+	// surrogate follower: if the command still gets committed, then the
+	// rollback succeeds and the next leader will start fresh, if if the
+	// command does not get committed, the next leader will find a stale
+	// follower and re-try to roll it back.
+	if txn.State() != transaction.Pending {
+		command := protocol.NewUndo(txn.ID())
+		if err := m.apply(tracer, conn, command); err != nil {
+			m.surrogateWriting(tracer, txn)
+			return errno(err)
+		}
+	}
+
+	tracer.Message("done")
+
+	return 0
+}
+
+// End is the hook invoked by sqlite when ending a write transaction.
+func (m *Methods) End(conn *bindings.Conn) int {
+	// We take a the lock for the entire duration of the hook to avoid
+	// races between to cocurrent hooks.
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// Lock the registry.
+	m.registry.Lock()
+	defer m.registry.Unlock()
+
+	tracer := m.registry.TracerConn(conn, "end")
+	tracer.Message("start")
+
+	txn := m.registry.TxnByConn(conn)
+	if txn == nil {
+		// Check if we have a surrogate transaction instead.
+		filename := m.registry.ConnLeaderFilename(conn)
+		conn := m.registry.ConnFollower(filename)
+		txn = m.registry.TxnByConn(conn)
+		if txn == nil {
+			// Ignore missing transactions that might have been removed by a
+			// particularly bad timing where a new leader has already sent
+			// some Undo command following a leadership change and our FSM
+			// applied it against a surrogate, removing it from the
+			// registry.
+			tracer.Message("done: ignore missing transaction")
+			return 0
+		}
+	} else {
+		// Sanity check
+		if txn.Conn() != conn {
+			tracer.Panic("unexpected transaction", conn)
+		}
+	}
+	tracer.Message("found txn %s", txn)
+
+	if !txn.IsLeader() {
+		// This must be a surrogate follower created by the Frames or
+		// Undo hooks. Let's ignore it, has it will be handled by the
+		// next leader of FSM.
+		tracer.Message("done: ignore surrogate follower")
+		return 0
+	}
+
+	if txn.IsZombie() {
+		// Ignore zombie transactions as we don't know what will happen
+		// to them (either committed or not).
+		tracer.Message("done: ignore zombie")
+		return 0
+	}
+
+	tracer.Message("unregister transaction")
+	m.registry.TxnDel(txn.ID())
+
+	tracer.Message("done")
+
+	return 0
+}
+
+// Create a surrogate follower transaction, transiting it to the Writing state.
+func (m *Methods) surrogateWriting(tracer *trace.Tracer, txn *transaction.Txn) {
+	tracer.Message("surrogate to Writing")
+	// TODO: fix
+	//txn = m.registry.TxnFollowerSurrogate(txn)
+	//txn.Frames(true, &sqlite3.ReplicationFramesParams{IsCommit: 0})
+}
+
+// Apply the given command through raft.
+func (m *Methods) apply(tracer *trace.Tracer, conn *bindings.Conn, cmd *protocol.Command) error {
+	tracer = tracer.With(trace.String("cmd", cmd.Name()))
+	tracer.Message("apply start")
+
+	data, err := protocol.MarshalCommand(cmd)
+	if err != nil {
+		return err
+	}
+
+	// We need to release the lock while the command is being applied,
+	// since the FSM of this raft instance needs to be able to acquire
+	// it. However, since we don't want the FSM to execute more than one
+	// log we also configure the registry's HookSync so the FSM will block
+	// on executing any log command otherwise than the one we are sending
+	// now. See also internal/registry/hook.go.
+	m.registry.HookSyncAdd(data)
+	m.registry.Unlock()
+	err = m.raft.Apply(data, m.applyTimeout).Error()
+	m.registry.Lock()
+
+	if err != nil {
+		tracer.Error("apply error", err)
+
+		// If the server has lost leadership or is shutting down, we
+		// return a dedicated error, so clients will typically retry
+		// against the new leader.
+		switch err {
+		case raft.ErrRaftShutdown:
+			// For our purposes, this is semantically equivalent
+			// to not being the leader anymore.
+			fallthrough
+		case raft.ErrNotLeader:
+			return bindings.Error{Code: bindings.ErrIoErrNotLeader}
+		case raft.ErrLeadershipLost:
+			return bindings.Error{Code: bindings.ErrIoErrLeadershipLost}
+		case raft.ErrEnqueueTimeout:
+			// This should be pretty much impossible, since Methods
+			// hooks are the only way to apply command logs, and
+			// hooks always wait for a command log to finish before
+			// applying a new one (see the Apply().Error() call
+			// above). We return SQLITE_INTERRUPT, which for our
+			// purposes has the same semantics as SQLITE_IOERR,
+			// i.e. it will automatically rollback the transaction.
+			return bindings.Error{Code: bindings.ErrInterrupt}
+		default:
+			// This is an unexpected raft error of some kind.
+			//
+			// TODO: We should investigate what this could be,
+			//       for example how to properly handle ErrAbortedByRestore
+			//       or log-store related errors. We should also
+			//       examine what SQLite exactly does if we return
+			//       SQLITE_INTERNAL.
+			return bindings.Error{Code: bindings.ErrInternal}
+		}
+
+	}
+
+	tracer.Message("apply done")
+	return nil
+}
+
+// Convert a Go error into a SQLite error number.
+func errno(err error) int {
+	switch e := err.(type) {
+	case bindings.Error:
+		return e.Code
+	default:
+		return bindings.ErrInternal
+	}
+}
+
+func isErrNotLeader(err error) bool {
+	if err, ok := err.(bindings.Error); ok {
+		if err.Code == bindings.ErrIoErrNotLeader {
+			return true
+		}
+	}
+	return false
+}
+
+func isErrLeadershipLost(err error) bool {
+	if err, ok := err.(bindings.Error); ok {
+		if err.Code == bindings.ErrIoErrLeadershipLost {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/trace.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/trace.go
new file mode 100644
index 0000000000..b085496efc
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/replication/trace.go
@@ -0,0 +1,23 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package replication
+
+/*
+// TracerName returns the name of the methods tracer to be used for the given
+// connection.
+func TracerName(connections *connection.Registry, conn *sqlite3.SQLiteConn) string {
+	return fmt.Sprintf("methods %d", connections.Serial(conn))
+}
+*/
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/iterate.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/iterate.go
new file mode 100644
index 0000000000..efeeff85b7
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/iterate.go
@@ -0,0 +1,29 @@
+package store
+
+import (
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// Handler handles a single command log yielded by Iterate.
+type Handler func(uint64, *raft.Log) error
+
+// Iterate through all command logs in the given store within the given range.
+func Iterate(logs raft.LogStore, r *Range, handler Handler) error {
+	for index := r.First; index <= r.Last; index++ {
+		log := &raft.Log{}
+		if err := logs.GetLog(index, log); err != nil {
+			return errors.Wrapf(err, "failed to get log %d", index)
+		}
+
+		if log.Type != raft.LogCommand {
+			continue
+		}
+
+		if err := handler(index, log); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/range.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/range.go
new file mode 100644
index 0000000000..7b3380145e
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/range.go
@@ -0,0 +1,59 @@
+package store
+
+import (
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// DefaultRange returns a Range spanning all available indexes.
+func DefaultRange(logs raft.LogStore) (*Range, error) {
+	first, err := logs.FirstIndex()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get first index")
+	}
+	last, err := logs.LastIndex()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get last index")
+	}
+
+	return &Range{First: first, Last: last}, nil
+}
+
+// HeadRange returns a range that includes only the first n entries.
+func HeadRange(logs raft.LogStore, n int) (*Range, error) {
+	first, err := logs.FirstIndex()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get first index")
+	}
+	last, err := logs.LastIndex()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get last index")
+	}
+
+	if first+uint64(n) < last {
+		last = first + uint64(n)
+	}
+
+	return &Range{First: first, Last: last}, nil
+}
+
+// TailRange returns a range that includes only the last n entries.
+func TailRange(logs raft.LogStore, n int) (*Range, error) {
+	last, err := logs.LastIndex()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to get last index")
+	}
+
+	first := uint64(0)
+	if last > uint64(n) {
+		first = last - uint64(n)
+	}
+
+	return &Range{First: first, Last: last}, nil
+}
+
+// Range contains the first and last index of a dump.
+type Range struct {
+	First uint64
+	Last  uint64
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/replay.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/replay.go
new file mode 100644
index 0000000000..a5ea4d7f8b
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/store/replay.go
@@ -0,0 +1,47 @@
+package store
+
+import (
+	"github.com/hashicorp/raft"
+)
+
+// Replay the commands in the given logs and snapshot stores using the given
+// dir as database directory.
+func Replay(logs raft.LogStore, snaps raft.SnapshotStore, r *Range, dir string) error {
+	return nil
+
+	/*
+		// Create a registry and a FSM.
+		registry := registry.New(dir)
+		fsm := replication.NewFSM(registry)
+
+		// Figure out if we have a snapshot to restore.
+		metas, err := snaps.List()
+		if err != nil {
+			return errors.Wrap(err, "failed to get snapshots list")
+		}
+
+		if len(metas) > 0 {
+			meta := metas[0] // The most recent.
+			_, reader, err := snaps.Open(meta.ID)
+			if err != nil {
+				return errors.Wrapf(err, "failed to open snapshot %s", meta.ID)
+			}
+			if err := fsm.Restore(reader); err != nil {
+				return errors.Wrapf(err, "failed to restore snapshot %s", meta.ID)
+			}
+
+			// Update the range
+			r.First = meta.Index + 1
+		}
+
+		// Replay the logs.
+		err = Iterate(logs, r, func(index uint64, log *raft.Log) error {
+			fsm.Apply(log)
+			return nil
+		})
+		if err != nil {
+			errors.Wrap(err, "failed to iterate through the logs")
+		}
+		return nil
+	*/
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/buffer.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/buffer.go
new file mode 100644
index 0000000000..6feec95d04
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/buffer.go
@@ -0,0 +1,88 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"time"
+)
+
+// A circular buffer of trace entries.
+type buffer struct {
+	// Fixed-size slice of entries in the buffer. When the slice fills, new
+	// entries will replace old ones at the beginning on the slice.
+	entries []entry
+
+	// Track the position of the last entry in the buffer.
+	cursor *cursor
+}
+
+// Create a new circular buffer of trace entries, retaining at most the given
+// number of entries.
+func newBuffer(n int) *buffer {
+	return &buffer{
+		entries: make([]entry, n),
+		cursor:  newCursor(0, n),
+	}
+}
+
+// Append a new entry to the buffer, possibly replacing an older one.
+func (b *buffer) Append(timestamp time.Time, message string, args []interface{}, err error, fields *fields) {
+	i := b.cursor.Position()
+
+	b.entries[i].timestamp = timestamp
+	b.entries[i].message = message
+	for j := range b.entries[i].args {
+		// Set arg j to either the provided arg or nil
+		if j < len(args) {
+			b.entries[i].args[j] = args[j]
+		} else {
+			b.entries[i].args[j] = nil
+		}
+	}
+	b.entries[i].error = err
+	b.entries[i].fields = fields
+
+	b.cursor.Advance()
+}
+
+// Return the last inserted entry
+func (b *buffer) Last() entry {
+	cursor := newCursor(b.cursor.Position(), len(b.entries))
+	cursor.Retract()
+	return b.entries[cursor.Position()]
+}
+
+// Return the list of current entries in the buffer.
+func (b *buffer) Entries() []entry {
+	entries := make([]entry, 0)
+
+	// We don't keep track of the actual number of entries in the buffer,
+	// instead we iterate them backwards until we find a "null" entry.
+	//
+	// A "null" entry is detected by looking at its timestamp and seeting
+	// that it's set to the Unix epoch.
+	n := len(b.entries)
+	cursor := newCursor(b.cursor.Position(), n)
+	for i := 0; i < n; i++ {
+		cursor.Retract()
+		previous := b.entries[cursor.Position()]
+		if previous.timestamp.Unix() == epoch {
+			break
+		}
+		entries = append([]entry{previous}, entries...)
+	}
+
+	return entries
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/constants.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/constants.go
new file mode 100644
index 0000000000..f4e8c83846
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/constants.go
@@ -0,0 +1,21 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+// Maximum number of key/value fields each trace entry might have.
+const maxFields = 6
+
+// Maximum number of format arguments each trace entry might have.
+const maxArgs = 4
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/cursor.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/cursor.go
new file mode 100644
index 0000000000..f2810af43c
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/cursor.go
@@ -0,0 +1,40 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+// A cursor holds the index of an entry of a circular buffer.
+type cursor struct {
+	position int // Current position of the cursor
+	length   int // Lenght of the circular buffer.
+}
+
+func newCursor(position, length int) *cursor {
+	return &cursor{
+		position: position,
+		length:   length,
+	}
+}
+
+func (c *cursor) Position() int {
+	return c.position
+}
+
+func (c *cursor) Advance() {
+	c.position = (c.position + c.length + 1) % c.length
+}
+
+func (c *cursor) Retract() {
+	c.position = (c.position + c.length - 1) % c.length
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/doc.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/doc.go
new file mode 100644
index 0000000000..875ab66ab2
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package trace implements a tracing system that can handle emitting large
+// amounts of entries with minimal performance overhead.
+package trace
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/entry.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/entry.go
new file mode 100644
index 0000000000..3d6ea9ad60
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/entry.go
@@ -0,0 +1,65 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"fmt"
+	"time"
+)
+
+// A single trace entry.
+type entry struct {
+	timestamp time.Time // Time at which the entry was created.
+	message   string    // Message of the entry.
+	args      args      // Additional format arguments for the message.
+	error     error     // Error associated with the entry.
+
+	// Key/value fields associated with the entry. This is a pointer
+	// because all entries of a specific tracer share the same fields.
+	fields *fields
+}
+
+// Timestamp returns a string representation of the entry's timestamp.
+func (e entry) Timestamp() string {
+	return e.timestamp.Format("2006-01-02 15:04:05.00000")
+}
+
+// Message returns a string with the entry's message along with its fields,
+// arguments and error.
+func (e entry) Message() string {
+	message := e.message
+
+	if e.args[0] != nil {
+		args := make([]interface{}, 0)
+		for i := 0; e.args[i] != nil; i++ {
+			args = append(args, e.args[i])
+		}
+		message = fmt.Sprintf(message, args...)
+	}
+
+	fields := ""
+	for i := 0; i < len(e.fields) && e.fields[i].key != ""; i++ {
+		fields += fmt.Sprintf("%s ", e.fields[i])
+	}
+
+	if e.error != nil {
+		message += fmt.Sprintf(": %v", e.error)
+	}
+
+	return fmt.Sprintf("%s%s", fields, message)
+}
+
+type args [maxArgs]interface{}
+type fields [maxFields]Field
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/field.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/field.go
new file mode 100644
index 0000000000..f7f48cbc73
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/field.go
@@ -0,0 +1,56 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import "fmt"
+
+// String returns a Field with a string value.
+func String(key string, value string) Field {
+	return Field{
+		key:      key,
+		isString: true,
+		string:   value,
+	}
+}
+
+// Integer returns a Field with an integer value.
+func Integer(key string, value int64) Field {
+	return Field{
+		key:     key,
+		integer: value,
+	}
+}
+
+// Field holds a single key/value pair in a trace Entry.
+type Field struct {
+	key      string // Name of the key
+	isString bool   // Whether the value is a string or an integer
+	string   string // String value
+	integer  int64  // Integer value
+}
+
+func (f Field) String() string {
+	format := "%s="
+	args := []interface{}{f.key}
+	if f.isString {
+		format += "%s"
+		args = append(args, f.string)
+	} else {
+		format += "%d"
+		args = append(args, f.integer)
+	}
+
+	return fmt.Sprintf(format, args...)
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/set.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/set.go
new file mode 100644
index 0000000000..6fb40142c3
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/set.go
@@ -0,0 +1,131 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+	"testing"
+	"time"
+)
+
+// Set manages the lifecycle of a set of Tracers.
+//
+// When Tracer.Panic() is invoked on any of the tracers of this, the entries of
+// all tracers in the set will be dumped as part of the panic message, ordered
+// by timestamp.
+type Set struct {
+	tracers map[string]*Tracer // Index of available tracers by name.
+	retain  int                // Number of entries each tracer will retain.
+
+	mu sync.RWMutex
+
+	// For testing only.
+	now     now        // Function returning the current time.
+	testing testing.TB // Emitted entries will also be sent to the test logger.
+	node    int        // Index of the node emitting the entries.
+}
+
+// NewSet creates a new tracer Set.
+//
+// Each Set has a number of 'tracers', each holding a different buffer
+// of trace entries, and each retaining at most 'retain' entrier.
+func NewSet(retain int) *Set {
+	return &Set{
+		tracers: make(map[string]*Tracer),
+		retain:  retain,
+		now:     time.Now,
+	}
+}
+
+// Add a new tracer to the registry.
+func (s *Set) Add(name string) *Tracer {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	_, ok := s.tracers[name]
+	if ok {
+		panic(fmt.Sprintf("a tracer named %s is already registered", name))
+	}
+	buffer := newBuffer(s.retain)
+	tracer := newTracer(s, name, buffer)
+	s.tracers[name] = tracer
+	return tracer
+}
+
+// Get the tracer with the given name, add one if does not exists.
+func (s *Set) Get(name string) *Tracer {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	tracer, ok := s.tracers[name]
+	if !ok {
+		panic(fmt.Sprintf("no tracer named %s is registered", name))
+	}
+	return tracer
+}
+
+// Del removes the tracer with the given name.
+func (s *Set) Del(name string) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	_, ok := s.tracers[name]
+	if !ok {
+		panic(fmt.Sprintf("no tracer named %s is registered", name))
+	}
+	delete(s.tracers, name)
+}
+
+// String returns a string representing all current entries, in all current
+// tracers, ordered by timestamp.
+func (s *Set) String() string {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	entries := make([]struct {
+		e entry   // Actual entry object
+		t *Tracer // Tracer that emitted the entry
+	}, 0)
+
+	for _, tracer := range s.tracers {
+		for _, e := range tracer.buffer.Entries() {
+			entries = append(entries, struct {
+				e entry
+				t *Tracer
+			}{e, tracer})
+		}
+	}
+	sort.Slice(entries, func(i, j int) bool {
+		return entries[i].e.timestamp.Before(entries[j].e.timestamp)
+	})
+
+	result := ""
+
+	for _, entry := range entries {
+		result += fmt.Sprintf(
+			"%s: %s: %s\n", entry.e.Timestamp(), entry.t.name, entry.e.Message())
+	}
+
+	return result
+}
+
+// Testing sets the tracers to log emitted entries through the given testing
+// instance.
+func (s *Set) Testing(t testing.TB, node int) {
+	s.testing = t
+	s.node = node
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/time.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/time.go
new file mode 100644
index 0000000000..74a687d60b
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/time.go
@@ -0,0 +1,23 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import "time"
+
+// A function returning the current time.
+type now func() time.Time
+
+// Nil value of a time.Time instance
+var epoch = (time.Time{}).Unix()
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/tracer.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/tracer.go
new file mode 100644
index 0000000000..09167db5cf
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/trace/tracer.go
@@ -0,0 +1,96 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"fmt"
+)
+
+// Tracer holds a buffer of recent trace entries in a trace Registry.
+type Tracer struct {
+	set    *Set    // Set this tracer is part of.
+	name   string  // Name of the tracer.
+	buffer *buffer // Ring buffer for trace entries.
+	fields fields  // Tracer-specific key/value pairs.
+}
+
+// Creates a new tracer.
+func newTracer(set *Set, name string, buffer *buffer) *Tracer {
+	return &Tracer{
+		set:    set,
+		name:   name,
+		buffer: buffer,
+		fields: fields{},
+	}
+}
+
+// Message emits a new trace message.
+func (t *Tracer) Message(message string, args ...interface{}) {
+	if n := len(args); n > maxArgs {
+		panic(fmt.Sprintf("a trace entry can have at most %d args, but %d were given", maxArgs, n))
+	}
+	t.emit(message, args, nil)
+}
+
+// Emit a new trace entry with an error attached.
+func (t *Tracer) Error(message string, err error) {
+	t.emit(message, nil, err)
+}
+
+// Panic causes a Go panic which will print all trace entries across all
+// tracers.
+func (t *Tracer) Panic(message string, v ...interface{}) {
+	message = fmt.Sprintf(message, v...)
+	if t.set.testing == nil {
+		message += "\n\ntrace:\n" + t.set.String()
+	}
+	panic(message)
+}
+
+// With returns a new Tracer instance emitting entries in the same buffer of this
+// tracer, but with additional predefined fields.
+func (t *Tracer) With(fields ...Field) *Tracer {
+	if n := len(fields); n > maxFields {
+		panic(fmt.Sprintf("a trace entry can have at most %d fields, but %d were given", maxFields, n))
+	}
+
+	// Create the child tracer, cloning the parent and using its entries
+	// buffer.
+	tracer := newTracer(t.set, t.name, t.buffer)
+
+	// Copy the fields of the parent into the child.
+	i := 0
+	for ; t.fields[i].key != ""; i++ {
+		tracer.fields[i] = t.fields[i]
+	}
+
+	// Add the child fields.
+	for j := range fields {
+		tracer.fields[i+j] = fields[j]
+	}
+
+	return tracer
+}
+
+// Emit a new trace entry.
+func (t *Tracer) emit(message string, args []interface{}, err error) {
+	t.buffer.Append(t.set.now(), message, args, err, &t.fields)
+
+	if t.set.testing != nil {
+		entry := t.buffer.Last()
+		format := "%d: %s: %s: %s\n"
+		t.set.testing.Logf(format, t.set.node, entry.Timestamp(), t.name, entry.Message())
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/state.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/state.go
new file mode 100644
index 0000000000..3ec38c4408
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/state.go
@@ -0,0 +1,67 @@
+package transaction
+
+import (
+	"github.com/ryanfaerman/fsm"
+)
+
+// Possible transaction states. Most states are associated with SQLite
+// replication hooks that are invoked upon transitioning from one lifecycle
+// state to the next.
+const (
+	Pending = fsm.State("pending") // Initial state right after creation.
+	Writing = fsm.State("writing") // After a non-commit frames command has been executed.
+	Written = fsm.State("written") // After a final commit frames command has been executed.
+	Undone  = fsm.State("undone")  // After an undo command has been executed.
+	Doomed  = fsm.State("doomed")  // The transaction has errored.
+)
+
+// Create a new FSM initialized with a fresh state object set to Pending.
+func newMachine() fsm.Machine {
+	return fsm.New(
+		fsm.WithRules(newRules()),
+		fsm.WithSubject(newState()),
+	)
+}
+
+// Capture valid state transitions within a transaction.
+func newRules() fsm.Ruleset {
+	rules := fsm.Ruleset{}
+
+	for o, states := range transitions {
+		for _, e := range states {
+			rules.AddTransition(fsm.T{O: o, E: e})
+		}
+	}
+
+	return rules
+}
+
+// Map of all valid state transitions.
+var transitions = map[fsm.State][]fsm.State{
+	Pending: {Writing, Written, Undone},
+	Writing: {Writing, Written, Undone, Doomed},
+	Written: {Doomed},
+	Undone:  {Doomed},
+}
+
+// Track the state of transaction. Implements the fsm.Stater interface.
+type state struct {
+	state fsm.State
+}
+
+// Return a new transaction state object, set to Pending.
+func newState() *state {
+	return &state{
+		state: Pending,
+	}
+}
+
+// CurrentState returns the current state, implementing fsm.Stater.
+func (s *state) CurrentState() fsm.State {
+	return s.state
+}
+
+// SetState switches the current state, implementing fsm.Stater.
+func (s *state) SetState(state fsm.State) {
+	s.state = state
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/txn.go b/vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/txn.go
new file mode 100644
index 0000000000..76f64e1502
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/internal/transaction/txn.go
@@ -0,0 +1,237 @@
+package transaction
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/ryanfaerman/fsm"
+)
+
+// Txn captures information about an active WAL write transaction that has been
+// started on a SQLite connection configured to be in either leader or follower
+// replication mode.
+type Txn struct {
+	conn     *bindings.Conn // Underlying SQLite db.
+	id       uint64         // Transaction ID.
+	machine  fsm.Machine    // Internal fsm for validating state changes.
+	isLeader bool           // Whether our connection is in leader mode.
+	isZombie bool           // Whether this is a zombie transaction, see Zombie().
+	dryRun   bool           // Dry run mode, don't invoke actual SQLite hooks.
+
+	// For leader transactions, these are the parameters of all non-commit
+	// frames commands that were executed so far during this
+	// transaction.
+	//
+	// They are used in case the last commit frames command failed with
+	// ErrLeadershipLost, and either the same server gets re-elected or a
+	// quorum was reached despite the glitch and another server was
+	// elected. In that situation the server that lost leadership in the
+	// first place will need to replay the whole transaction using a
+	// follower connection, since its transaction (associated with a leader
+	// connection) was rolled back by SQLite.
+	frames []bindings.WalReplicationFrameInfo
+}
+
+// New creates a new Txn instance.
+func New(conn *bindings.Conn, id uint64) *Txn {
+	return &Txn{
+		conn:    conn,
+		id:      id,
+		machine: newMachine(),
+	}
+}
+
+func (t *Txn) String() string {
+	s := fmt.Sprintf("%d %s as ", t.id, t.State())
+	if t.IsLeader() {
+		s += "leader"
+		if t.IsZombie() {
+			s += " (zombie)"
+		}
+	} else {
+		s += "follower"
+	}
+	return s
+}
+
+// Leader marks this transaction as a leader transaction.
+//
+// A leader transaction is automatically set to dry-run, since the SQLite will
+// trigger itself the relevant WAL APIs when transitioning between states.
+//
+// Depending on the particular replication hook being executed SQLite might do
+// that before or after the hook. See src/pager.c in SQLite source code for
+// details about when WAL APis are invoked exactly with respect to the various
+// sqlite3_replication_methods hooks.
+func (t *Txn) Leader() {
+	if t.isLeader {
+		panic("transaction is already marked as leader")
+	}
+	t.isLeader = true
+	t.DryRun()
+}
+
+// IsLeader returns true if the underlying connection is in leader
+// replication mode.
+func (t *Txn) IsLeader() bool {
+	return t.isLeader
+}
+
+// DryRun makes this transaction only transition between states, without
+// actually invoking the relevant SQLite APIs.
+//
+// This is used to create a surrogate follower, and for tests.
+func (t *Txn) DryRun() {
+	if t.dryRun {
+		panic("transaction is already in dry-run mode")
+	}
+	t.dryRun = true
+}
+
+// Conn returns the sqlite connection that started this write
+// transaction.
+func (t *Txn) Conn() *bindings.Conn {
+	return t.conn
+}
+
+// ID returns the ID associated with this transaction.
+func (t *Txn) ID() uint64 {
+	return t.id
+}
+
+// State returns the current state of the transaction.
+func (t *Txn) State() fsm.State {
+	return t.machine.Subject.CurrentState()
+}
+
+// Frames writes frames to the WAL.
+func (t *Txn) Frames(begin bool, info bindings.WalReplicationFrameInfo) error {
+	state := Writing
+	if info.IsCommitGet() {
+		state = Written
+	}
+	return t.transition(state, begin, info)
+}
+
+// Undo reverts all changes to the WAL since the start of the
+// transaction.
+func (t *Txn) Undo() error {
+	return t.transition(Undone)
+}
+
+// Zombie marks this transaction as zombie. It must be called only for leader
+// transactions.
+//
+// A zombie transaction is one whose leader has lost leadership while applying
+// the associated FSM command. The transaction is left in state passed as
+// argument.
+func (t *Txn) Zombie() {
+	if !t.isLeader {
+		panic("follower transactions can't be marked as zombie")
+	}
+	if t.isZombie {
+		panic("transaction is already marked as zombie")
+	}
+	t.isZombie = true
+}
+
+// IsZombie returns true if this is a zombie transaction.
+func (t *Txn) IsZombie() bool {
+	if !t.isLeader {
+		panic("follower transactions can't be zombie")
+	}
+	return t.isZombie
+}
+
+// Resurrect a zombie transaction.
+//
+// This should be called only on zombie transactions in Pending or Writing
+// state, in case a leader that lost leadership was re-elected right away or a
+// quorum for a lost commit frames command was reached and the new leader is
+// replicating it on the former leader.
+//
+// A new follower transaction will be created with the given connection (which
+// is assumed to be in follower replication mode), and set to the same ID as
+// this zombie.
+//
+// All preceeding non-commit frames commands (if any) will be re-applied on the
+// follower transaction.
+//
+// If no error occurrs, the newly created follower transaction is returned.
+func (t *Txn) Resurrect(conn *bindings.Conn) (*Txn, error) {
+	if !t.isLeader {
+		panic("attempt to resurrect follower transaction")
+	}
+	if !t.isZombie {
+		panic("attempt to resurrect non-zombie transaction")
+	}
+	if t.State() != Pending && t.State() != Writing {
+		panic("attempt to resurrect a transaction not in pending or writing state")
+	}
+	txn := New(conn, t.ID())
+
+	for i, frames := range t.frames {
+		begin := i == 0
+		if err := txn.transition(Writing, begin, frames); err != nil {
+			return nil, err
+		}
+	}
+
+	return txn, nil
+}
+
+// Try to transition to the given state. If the transition is invalid,
+// panic out.
+func (t *Txn) transition(state fsm.State, args ...interface{}) error {
+	if err := t.machine.Transition(state); err != nil {
+		panic(fmt.Sprintf("invalid %s -> %s transition", t.State(), state))
+	}
+
+	if t.isLeader {
+		// In leader mode, don't actually invoke SQLite replication
+		// API, since that will be done by SQLite internally.
+		switch state {
+		case Writing:
+			// Save non-commit frames in case the last commit fails
+			// and gets recovered by the same leader.
+			begin := args[0].(bool)
+			frames := args[1].(bindings.WalReplicationFrameInfo)
+			if begin {
+				t.frames = append(t.frames, frames)
+			}
+		case Written:
+			fallthrough
+		case Undone:
+			// Reset saved frames. They are not going to be used
+			// anymore and they help garbage-collecting them, since
+			// the tracer holds references to a number of
+			// transaction objects.
+			t.frames = nil
+		}
+	}
+
+	if t.dryRun {
+		// In dry run mode, don't actually invoke any SQLite API.
+		return nil
+	}
+
+	var err error
+	switch state {
+	case Writing:
+		fallthrough
+	case Written:
+		//begin := args[0].(bool)
+		info := args[1].(bindings.WalReplicationFrameInfo)
+		err = t.conn.WalReplicationFrames(info)
+	case Undone:
+		err = t.conn.WalReplicationUndo()
+	}
+
+	if err != nil {
+		if err := t.machine.Transition(Doomed); err != nil {
+			panic(fmt.Sprintf("cannot doom from %s", t.State()))
+		}
+	}
+
+	return err
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/log.go b/vendor/github.com/CanonicalLtd/go-dqlite/log.go
new file mode 100644
index 0000000000..6f2106a124
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/log.go
@@ -0,0 +1,33 @@
+package dqlite
+
+import (
+	"fmt"
+	"log"
+	"os"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/logging"
+)
+
+// LogFunc is a function that can be used for logging.
+type LogFunc = logging.Func
+
+// LogLevel defines the logging level.
+type LogLevel = logging.Level
+
+// Available logging levels.
+const (
+	LogDebug = logging.Debug
+	LogInfo  = logging.Info
+	LogWarn  = logging.Warn
+	LogError = logging.Error
+)
+
+// Create a LogFunc with reasonable defaults.
+func defaultLogFunc() LogFunc {
+	logger := log.New(os.Stdout, "", log.LstdFlags|log.Lmicroseconds)
+
+	return func(l LogLevel, format string, a ...interface{}) {
+		format = fmt.Sprintf("[%s]: %s", l.String(), format)
+		logger.Printf(format, a...)
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/recover/delete/delete.go b/vendor/github.com/CanonicalLtd/go-dqlite/recover/delete/delete.go
new file mode 100644
index 0000000000..d6355b81cd
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/recover/delete/delete.go
@@ -0,0 +1,36 @@
+package delete
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/store"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// Delete the given log entries from the given store.
+func Delete(logs raft.LogStore, index uint64) error {
+	r, err := store.DefaultRange(logs)
+	if err != nil {
+		return errors.Wrap(err, "failed to get current log store range")
+	}
+
+	found := false
+
+	store.Iterate(logs, r, func(i uint64, log *raft.Log) error {
+		if i == index {
+			found = true
+		}
+		return nil
+	})
+
+	if !found {
+		return fmt.Errorf("log %d not found", index)
+	}
+
+	if err := logs.DeleteRange(index, r.Last); err != nil {
+		return errors.Wrapf(err, "failed to delete range %d -> %d", index, r.Last)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/dump.go b/vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/dump.go
new file mode 100644
index 0000000000..9b1f00a1e3
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/dump.go
@@ -0,0 +1,105 @@
+package dump
+
+import (
+	"fmt"
+	"io"
+	"log"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/protocol"
+	"github.com/CanonicalLtd/go-dqlite/internal/store"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// Dump the content of a dqlite store.
+func Dump(logs raft.LogStore, snaps raft.SnapshotStore, out io.Writer, options ...Option) error {
+	o := defaultOptions()
+	for _, option := range options {
+		err := option(logs, o)
+		if err != nil {
+			return err
+		}
+	}
+
+	if o.r == nil {
+		r, err := store.DefaultRange(logs)
+		if err != nil {
+			return err
+		}
+		o.r = r
+	}
+
+	if o.dir != "" {
+		// Replay the logs.
+		if err := store.Replay(logs, snaps, o.r, o.dir); err != nil {
+			return errors.Wrap(err, "failed to replay logs")
+		}
+		return nil
+	}
+
+	logger := log.New(out, "", 0)
+
+	h := func(index uint64, log *raft.Log) error {
+		cmd, err := protocol.UnmarshalCommand(log.Data)
+		if err != nil {
+			return errors.Wrapf(err, "index %d: failed to unmarshal command", index)
+		}
+
+		logger.Printf(dumpCommand(index, cmd))
+		return nil
+	}
+
+	return store.Iterate(logs, o.r, h)
+}
+
+func dumpCommand(index uint64, cmd *protocol.Command) string {
+	var name string
+	var dump string
+	switch payload := cmd.Payload.(type) {
+	case *protocol.Command_Open:
+		name = "open"
+		dump = dumpOpen(payload.Open)
+	case *protocol.Command_Begin:
+		name = "begin"
+		dump = dumpBegin(payload.Begin)
+	case *protocol.Command_Frames:
+		name = "frames"
+		dump = dumpFrames(payload.Frames)
+	case *protocol.Command_Undo:
+		name = "undo"
+		dump = dumpUndo(payload.Undo)
+	case *protocol.Command_End:
+		name = "end"
+		dump = dumpEnd(payload.End)
+	case *protocol.Command_Checkpoint:
+		name = "checkpoint"
+		dump = dumpCheckpoint(payload.Checkpoint)
+	}
+
+	return fmt.Sprintf("index %6d: %-8s: %s", index, name, dump)
+}
+
+func dumpOpen(params *protocol.Open) string {
+	return fmt.Sprintf("name: %8s", params.Name)
+}
+
+func dumpBegin(params *protocol.Begin) string {
+	return fmt.Sprintf("name: %8s txn: %6d", params.Name, params.Txid)
+}
+
+func dumpFrames(params *protocol.Frames) string {
+	return fmt.Sprintf("name: %8s txn: %6d commit: %d pages: %2d",
+		params.Filename, params.Txid, params.IsCommit, len(params.PageNumbers))
+}
+
+func dumpUndo(params *protocol.Undo) string {
+	return fmt.Sprintf("txn: %6d", params.Txid)
+}
+
+func dumpEnd(params *protocol.End) string {
+	return fmt.Sprintf("txn: %6d", params.Txid)
+}
+
+func dumpCheckpoint(params *protocol.Checkpoint) string {
+	return fmt.Sprintf("file: %-8s", params.Name)
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/options.go b/vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/options.go
new file mode 100644
index 0000000000..a443cdb8b6
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/recover/dump/options.go
@@ -0,0 +1,56 @@
+package dump
+
+import (
+	"github.com/CanonicalLtd/go-dqlite/internal/store"
+	"github.com/hashicorp/raft"
+)
+
+// Option to tweak the output of dump
+type Option func(logs raft.LogStore, o *options) error
+
+// Tail limits the output to the last N entries.
+func Tail(n int) Option {
+	return func(logs raft.LogStore, o *options) error {
+		r, err := store.TailRange(logs, n)
+		if err != nil {
+			return err
+		}
+
+		o.r = r
+
+		return nil
+	}
+}
+
+// Head limits the output to the first N entries.
+func Head(n int) Option {
+	return func(logs raft.LogStore, o *options) error {
+		r, err := store.HeadRange(logs, n)
+		if err != nil {
+			return err
+		}
+
+		o.r = r
+
+		return nil
+	}
+}
+
+// Replay the commands generating SQLite databases in the given dir.
+func Replay(dir string) Option {
+	return func(logs raft.LogStore, o *options) error {
+		o.dir = dir
+		return nil
+	}
+}
+
+// Hold options for the Dump function.
+type options struct {
+	r   *store.Range
+	dir string
+}
+
+// Return the default Dump options.
+func defaultOptions() *options {
+	return &options{}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/recover/open.go b/vendor/github.com/CanonicalLtd/go-dqlite/recover/open.go
new file mode 100644
index 0000000000..bdc57a4bce
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/recover/open.go
@@ -0,0 +1,30 @@
+package recover
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/hashicorp/raft"
+	"github.com/hashicorp/raft-boltdb"
+	"github.com/pkg/errors"
+)
+
+// Open a raft store in the given dir.
+func Open(dir string) (raft.LogStore, raft.SnapshotStore, error) {
+	if _, err := os.Stat(dir); err != nil {
+		return nil, nil, errors.Wrap(err, "invalid raft data dir")
+	}
+
+	logs, err := raftboltdb.NewBoltStore(filepath.Join(dir, "logs.db"))
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "failed to open boltdb file")
+	}
+
+	snaps, err := raft.NewFileSnapshotStore(dir, 1, ioutil.Discard)
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "failed to open snapshot store")
+	}
+
+	return logs, snaps, nil
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/registry.go b/vendor/github.com/CanonicalLtd/go-dqlite/registry.go
new file mode 100644
index 0000000000..33a180e773
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/registry.go
@@ -0,0 +1,50 @@
+package dqlite
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/logging"
+	"github.com/CanonicalLtd/go-dqlite/internal/registry"
+)
+
+// Registry tracks internal data shared by the dqlite Driver and FSM.
+type Registry struct {
+	name     string
+	vfs      *bindings.Vfs
+	logger   *bindings.Logger
+	registry *registry.Registry
+}
+
+// NewRegistry creates a new Registry, which is expected to be passed to both
+// NewFSM and NewDriver.
+//
+// The ID parameter is a string identifying the local node.
+func NewRegistry(id string) *Registry {
+	return NewRegistryWithLogger(id, logging.Stdout())
+}
+
+// NewRegistryWithLogger returns a registry configured with the given logger.
+func NewRegistryWithLogger(id string, log LogFunc) *Registry {
+	name := fmt.Sprintf("dqlite-%s", id)
+
+	logger := bindings.NewLogger(log)
+
+	vfs, err := bindings.NewVfs(name, logger)
+	if err != nil {
+		panic("failed to register VFS")
+	}
+
+	return &Registry{
+		name:     name,
+		vfs:      vfs,
+		registry: registry.New(vfs),
+		logger:   logger,
+	}
+}
+
+// Close the registry.
+func (r *Registry) Close() {
+	r.vfs.Close()
+	r.logger.Close()
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/run-demo b/vendor/github.com/CanonicalLtd/go-dqlite/run-demo
new file mode 100755
index 0000000000..dfdb280059
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/run-demo
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+
+import os
+import sys
+import shutil
+import subprocess
+import time
+
+BUILD_DQLITE = "go build -tags libsqlite3 .".split(" ")
+BUILD_DEMO = "go build -tags libsqlite3 testdata/demo.go".split(" ")
+BASE_PORT = 9980
+
+
+def spawnNode(n, purge=False):
+    data = "/tmp/dqlite-demo-%d" % n
+    port = BASE_PORT + n
+
+    if purge and os.path.exists(data):
+        shutil.rmtree(data)
+    if not os.path.exists(data):
+        os.mkdir(data)
+
+    args = ["./demo", "-data", data, "-addr", "127.0.0.1:%d" % port]
+    if n > 0:
+        args.extend(["-join", "127.0.0.1:%d" % BASE_PORT])
+
+    env = os.environ.copy()
+    env.update({
+        "LD_LIBRARY_PATH": os.path.join(os.getcwd(), ".sqlite"),
+    })
+
+    fd = open(os.path.join(data, "log"), "a+")
+    return subprocess.Popen(args, env=env, stdout=fd, stderr=fd)
+
+def spawnTail():
+    args = ["tail", "-f"]
+    for i in range(3):
+        args.append("/tmp/dqlite-demo-%d/log" % i)
+    return subprocess.Popen(args, stderr=subprocess.STDOUT)
+
+if __name__ == "__main__":
+    subprocess.check_call(BUILD_DQLITE)
+    subprocess.check_call(BUILD_DEMO)
+
+    processes = []
+    for i in range(3):
+        processes.append(spawnNode(i, purge=True))
+    spawnTail()
+
+    while True:
+        for i, process in enumerate(processes):
+            rc = process.poll()
+            if rc is not None:
+                if rc != 0:
+                    print("ERROR node %d exited with return code %d" % (i, rc))
+                    for j, process in enumerate(processes):
+                        if j != i:
+                            process.kill()
+                    sys.exit(1)
+                # respawn
+                processes[i] = spawnNode(i)
+        time.sleep(0.5)
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/server.go b/vendor/github.com/CanonicalLtd/go-dqlite/server.go
new file mode 100644
index 0000000000..ce06510454
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/server.go
@@ -0,0 +1,233 @@
+package dqlite
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"path/filepath"
+	"runtime"
+	"time"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/bindings"
+	"github.com/CanonicalLtd/go-dqlite/internal/replication"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// Server implements the dqlite network protocol.
+type Server struct {
+	log         LogFunc          // Logger
+	registry    *Registry        // Registry wrapper
+	server      *bindings.Server // Low-level C implementation
+	listener    net.Listener     // Queue of new connections
+	runCh       chan error       // Receives the low-level C server return code
+	acceptCh    chan error       // Receives connection handling errors
+	replication *bindings.WalReplication
+	logger      *bindings.Logger
+	cluster     *bindings.Cluster
+}
+
+// ServerOption can be used to tweak server parameters.
+type ServerOption func(*serverOptions)
+
+// WithServerLogFunc sets a custom log function for the server.
+func WithServerLogFunc(log LogFunc) ServerOption {
+	return func(options *serverOptions) {
+		options.Log = log
+	}
+}
+
+// WithServerAddressProvider sets a custom resolver for server addresses.
+func WithServerAddressProvider(provider raft.ServerAddressProvider) ServerOption {
+	return func(options *serverOptions) {
+		options.AddressProvider = provider
+	}
+}
+
+// NewServer creates a new Server instance.
+func NewServer(raft *raft.Raft, registry *Registry, listener net.Listener, options ...ServerOption) (*Server, error) {
+	o := defaultServerOptions()
+
+	for _, option := range options {
+		option(o)
+	}
+
+	replication, err := newWalReplication(registry, raft)
+	if err != nil {
+		return nil, err
+	}
+
+	cluster, err := newCluster(registry, raft, o.AddressProvider)
+	if err != nil {
+		return nil, err
+	}
+
+	server, err := bindings.NewServer(cluster)
+	if err != nil {
+		return nil, err
+	}
+
+	logger := bindings.NewLogger(o.Log)
+
+	server.SetLogger(logger)
+	server.SetVfs(registry.name)
+	server.SetWalReplication(registry.name)
+
+	s := &Server{
+		log:         o.Log,
+		registry:    registry,
+		server:      server,
+		listener:    listener,
+		runCh:       make(chan error),
+		acceptCh:    make(chan error, 1),
+		logger:      logger,
+		cluster:     cluster,
+		replication: replication,
+	}
+
+	go s.run()
+
+	if !s.server.Ready() {
+		return nil, fmt.Errorf("server failed to start")
+	}
+
+	go s.acceptLoop()
+
+	return s, nil
+}
+
+func newWalReplication(registry *Registry, raft *raft.Raft) (*bindings.WalReplication, error) {
+	methods := replication.NewMethods(registry.registry, raft)
+
+	replication, err := bindings.NewWalReplication(registry.name, methods)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to register WAL replication")
+	}
+
+	return replication, nil
+}
+
+func newCluster(registry *Registry, raft *raft.Raft, provider raft.ServerAddressProvider) (*bindings.Cluster, error) {
+	methods := &cluster{
+		raft:     raft,
+		registry: registry.registry,
+		provider: provider,
+	}
+
+	return bindings.NewCluster(methods)
+}
+
+// Hold configuration options for a dqlite server.
+type serverOptions struct {
+	Log             LogFunc
+	AddressProvider raft.ServerAddressProvider
+}
+
+// Run the server.
+func (s *Server) run() {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	s.runCh <- s.server.Run()
+}
+
+func (s *Server) acceptLoop() {
+	s.log(LogDebug, "accepting connections")
+
+	for {
+		conn, err := s.listener.Accept()
+		if err != nil {
+			s.acceptCh <- nil
+			return
+		}
+
+		err = s.server.Handle(conn)
+		if err != nil {
+			if err == bindings.ErrServerStopped {
+				// Ignore failures due to the server being
+				// stopped.
+				err = nil
+			}
+			s.acceptCh <- err
+			return
+		}
+	}
+}
+
+// Dump the files of a database to disk.
+func (s *Server) Dump(name string, dir string) error {
+	// Dump the database file.
+	bytes, err := s.registry.vfs.ReadFile(name)
+	if err != nil {
+		return errors.Wrap(err, "failed to get database file content")
+	}
+
+	path := filepath.Join(dir, name)
+	if err := ioutil.WriteFile(path, bytes, 0600); err != nil {
+		return errors.Wrap(err, "failed to write database file")
+	}
+
+	// Dump the WAL file.
+	bytes, err = s.registry.vfs.ReadFile(name + "-wal")
+	if err != nil {
+		return errors.Wrap(err, "failed to get WAL file content")
+	}
+
+	path = filepath.Join(dir, name+"-wal")
+	if err := ioutil.WriteFile(path, bytes, 0600); err != nil {
+		return errors.Wrap(err, "failed to write WAL file")
+	}
+
+	return nil
+}
+
+// Close the server, releasing all resources it created.
+func (s *Server) Close() error {
+	// Close the listener, which will make the listener.Accept() call in
+	// acceptLoop() return an error.
+	if err := s.listener.Close(); err != nil {
+		return err
+	}
+
+	// Wait for the acceptLoop goroutine to exit.
+	select {
+	case err := <-s.acceptCh:
+		if err != nil {
+			return errors.Wrap(err, "accept goroutine failed")
+		}
+	case <-time.After(time.Second):
+		return fmt.Errorf("accept goroutine did not stop within a second")
+	}
+
+	// Send a stop signal to the dqlite event loop.
+	if err := s.server.Stop(); err != nil {
+		return errors.Wrap(err, "server failed to stop")
+	}
+
+	// Wait for the run goroutine to exit.
+	select {
+	case err := <-s.runCh:
+		if err != nil {
+			return errors.Wrap(err, "accept goroutine failed")
+		}
+	case <-time.After(time.Second):
+		return fmt.Errorf("server did not stop within a second")
+	}
+
+	s.server.Close()
+
+	s.logger.Close()
+	s.cluster.Close()
+	s.replication.Close()
+	s.registry.Close()
+
+	return nil
+}
+
+// Create a serverOptions object with sane defaults.
+func defaultServerOptions() *serverOptions {
+	return &serverOptions{
+		Log:             defaultLogFunc(),
+		AddressProvider: nil,
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/go-dqlite/store.go b/vendor/github.com/CanonicalLtd/go-dqlite/store.go
new file mode 100644
index 0000000000..bb1255cda2
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/go-dqlite/store.go
@@ -0,0 +1,141 @@
+package dqlite
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+
+	"github.com/pkg/errors"
+
+	"github.com/CanonicalLtd/go-dqlite/internal/client"
+	_ "github.com/mattn/go-sqlite3" // Go SQLite bindings
+)
+
+// ServerInfo holds information about a single server.
+type ServerInfo = client.ServerInfo
+
+// ServerStore is used by a dqlite client to get an initial list of candidate
+// dqlite server addresses that it can dial in order to find a leader dqlite
+// server to use.
+//
+// Once connected, the client periodically updates the addresses in the store
+// by querying the leader server about changes in the cluster (such as servers
+// being added or removed).
+type ServerStore = client.ServerStore
+
+// InmemServerStore keeps the list of target gRPC SQL servers in memory.
+type InmemServerStore = client.InmemServerStore
+
+// NewInmemServerStore creates ServerStore which stores its data in-memory.
+var NewInmemServerStore = client.NewInmemServerStore
+
+// DatabaseServerStore persists a list addresses of dqlite servers in a SQL table.
+type DatabaseServerStore struct {
+	db     *sql.DB // Database handle to use.
+	schema string  // Name of the schema holding the servers table.
+	table  string  // Name of the servers table.
+	column string  // Column name in the servers table holding the server address.
+}
+
+// DefaultServerStore creates a new ServerStore using the given filename to
+// open a SQLite database, with default names for the schema, table and column
+// parameters.
+//
+// It also creates the table if it doesn't exist yet.
+func DefaultServerStore(filename string) (*DatabaseServerStore, error) {
+	// Open the database.
+	db, err := sql.Open("sqlite3", filename)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to open database")
+	}
+
+	// Since we're setting SQLite single-thread mode, we need to have one
+	// connection at most.
+	db.SetMaxOpenConns(1)
+
+	// Create the servers table if it does not exist yet.
+	_, err = db.Exec("CREATE TABLE IF NOT EXISTS servers (address TEXT, UNIQUE(address))")
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create servers table")
+	}
+
+	store := NewServerStore(db, "main", "servers", "address")
+
+	return store, nil
+}
+
+// NewServerStore creates a new ServerStore.
+func NewServerStore(db *sql.DB, schema, table, column string) *DatabaseServerStore {
+	return &DatabaseServerStore{
+		db:     db,
+		schema: schema,
+		table:  table,
+		column: column,
+	}
+}
+
+// Get the current servers.
+func (d *DatabaseServerStore) Get(ctx context.Context) ([]ServerInfo, error) {
+	tx, err := d.db.Begin()
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to begin transaction")
+	}
+	defer tx.Rollback()
+
+	query := fmt.Sprintf("SELECT %s FROM %s.%s", d.column, d.schema, d.table)
+	rows, err := tx.QueryContext(ctx, query)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to query servers table")
+	}
+	defer rows.Close()
+
+	servers := make([]ServerInfo, 0)
+	for rows.Next() {
+		var address string
+		err := rows.Scan(&address)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to fetch server address")
+		}
+		servers = append(servers, ServerInfo{ID: 1, Address: address})
+	}
+	if err := rows.Err(); err != nil {
+		return nil, errors.Wrap(err, "result set failure")
+	}
+
+	return servers, nil
+}
+
+// Set the servers addresses.
+func (d *DatabaseServerStore) Set(ctx context.Context, servers []ServerInfo) error {
+	tx, err := d.db.Begin()
+	if err != nil {
+		return errors.Wrap(err, "failed to begin transaction")
+	}
+
+	query := fmt.Sprintf("DELETE FROM %s.%s", d.schema, d.table)
+	if _, err := tx.ExecContext(ctx, query); err != nil {
+		tx.Rollback()
+		return errors.Wrap(err, "failed to delete existing servers rows")
+	}
+
+	query = fmt.Sprintf("INSERT INTO %s.%s(%s) VALUES (?)", d.schema, d.table, d.column)
+	stmt, err := tx.PrepareContext(ctx, query)
+	if err != nil {
+		tx.Rollback()
+		return errors.Wrap(err, "failed to prepare insert statement")
+	}
+	defer stmt.Close()
+
+	for _, server := range servers {
+		if _, err := stmt.ExecContext(ctx, server.Address); err != nil {
+			tx.Rollback()
+			return errors.Wrapf(err, "failed to insert server %s", server.Address)
+		}
+	}
+
+	if err := tx.Commit(); err != nil {
+		return errors.Wrap(err, "failed to commit transaction")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-http/AUTHORS b/vendor/github.com/CanonicalLtd/raft-http/AUTHORS
new file mode 100644
index 0000000000..6e13f86ebb
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/AUTHORS
@@ -0,0 +1 @@
+Free Ekanayaka <free.ekanayaka at canonical.com>
diff --git a/vendor/github.com/CanonicalLtd/raft-http/LICENSE b/vendor/github.com/CanonicalLtd/raft-http/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/CanonicalLtd/raft-http/README.md b/vendor/github.com/CanonicalLtd/raft-http/README.md
new file mode 100644
index 0000000000..5a7bd20085
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/README.md
@@ -0,0 +1,14 @@
+raft-http [![Build Status](https://travis-ci.org/CanonicalLtd/raft-http.png)](https://travis-ci.org/CanonicalLtd/raft-http) [![Coverage Status](https://coveralls.io/repos/github/CanonicalLtd/raft-http/badge.svg?branch=master)](https://coveralls.io/github/CanonicalLtd/raft-http?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/CanonicalLtd/raft-http)](https://goreportcard.com/report/github.com/CanonicalLtd/raft-http)  [![GoDoc](https://godoc.org/github.com/CanonicalLtd/raft-http?status.svg)](https://godoc.org/github.com/CanonicalLtd/raft-http)
+=========
+
+This repository provides the `rafthttp` package, which can be used to
+establish a network connection between to raft nodes using HTTP. Once
+the HTTP connection is established, the Upgrade header will be used to
+switch it to raw TCP mode, and the regular TCP-based network transport
+of the `raft` [package](https://github.com/hashicorp/raft) can take it
+from there.
+
+Documentation
+==============
+
+The documentation for this package can be found on [Godoc](http://godoc.org/github.com/CanonicalLtd/raft-http).
diff --git a/vendor/github.com/CanonicalLtd/raft-http/dial.go b/vendor/github.com/CanonicalLtd/raft-http/dial.go
new file mode 100644
index 0000000000..c11c029431
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/dial.go
@@ -0,0 +1,50 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+// Dial is a function that given an address and a timeout returns a
+// new network connection (typically TCP or TLS over TCP).
+type Dial func(addr string, timeout time.Duration) (net.Conn, error)
+
+// NewDialTCP returns a Dial function that establishes a network
+// connection using raw TCP.
+func NewDialTCP() Dial {
+	dial := func(addr string, timeout time.Duration) (net.Conn, error) {
+		dialer := newDialerWithTimeout(timeout)
+		return dialer.Dial("tcp", addr)
+	}
+	return dial
+}
+
+// NewDialTLS returns a Dial function that enstablishes a network
+// connection using TLS over TCP.
+func NewDialTLS(config *tls.Config) Dial {
+	dial := func(addr string, timeout time.Duration) (net.Conn, error) {
+		dialer := newDialerWithTimeout(timeout)
+		return tls.DialWithDialer(dialer, "tcp", addr, config)
+	}
+	return dial
+}
+
+// Convenience to create a Dialer configured with the give timeout.
+func newDialerWithTimeout(timeout time.Duration) *net.Dialer {
+	return &net.Dialer{Timeout: timeout}
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-http/doc.go b/vendor/github.com/CanonicalLtd/raft-http/doc.go
new file mode 100644
index 0000000000..39e4ea1d91
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/doc.go
@@ -0,0 +1,67 @@
+// Package rafthttp provides an extension for the github.com/hashicorp/raft
+// package.
+//
+// It implements a raft.StreamLayer that a raft.NetworkTransport can use to
+// connect to and accept connections from other raft.Transport's using
+// HTTP/WebSocket rather than straight TCP.
+//
+// This is handy for applications that expose an HTTP endpoint and don't want
+// to open an extra TCP port for handling raft-level traffic.
+//
+// In addition to the regular raft.StreamLayer interface, rafthttp.Layer
+// implements extra methods to join and leave a cluster.
+//
+// Typical usage of this package is as follows:
+//
+// - Create a rafthttp.Handler object which implements the standard
+//   http.Handler interface.
+//
+// - Create a standard http.Server and configure it to route an endpoint path
+//   of your choice to the rafthttp.Handler above. All your raft servers must
+//   use the same endpoint path. You'll probably want to gate the
+//   rafthttp.Handler behind some authorization mechanism of your choice.
+//
+// - Create a net.Listener and use it to start a the http.Server create
+//   above. From this point the rafthttp.Handler will start accepting
+//   raft-related requests.
+//
+// - Create a rafthttp.Layer object passing it:
+//
+//   1) The endpoint path you chose above, which will be used to establish
+//      outbound raft.Transport connections to other raft servers over
+//      HTTP/WebSocket.
+//
+//   2) The network address of the net.Listener you used to start the
+//      http.Server, which will be used by the local raft server to know its
+//      own network address.
+//
+//   3) The rafthttp.Handler object you created above, which will be used to
+//      accept inbound raft.NetworkTransport connections from other raft
+//      servers over HTTP/WebSocket.
+//
+//   4) A rafthttp.Dial function, which will be used to establish outbound
+//      raft.NetworkTransport connections to other raft servers over
+//      HTTP/WebSocket (the rafthttp.Layer will use it to perform HTTP requests
+//      to other servers using your chosen endpoint path).
+//
+// - Create a raft.NetworkTransport passing it the rafthttp.Layer you created
+//   above.
+//
+// - Create a raft.Raft server using the raft.NetworkTransport created above.
+//
+// - Spawn a goroutine running the raftmembership.HandleChangeRequests function
+//   from the github.com/Canonical/raft-membership package, passing it the
+//   raft.Raft server you created above and the channel returned by Request()
+//   method of the rafthttp.Handler created above. This will process join and
+//   leave requests, that you can perform using the Join() and Leave() methods
+//   of the rafthttp.Layer object you created above. This goroutine will
+//   terminate automatically when you shutdown your raft.Raft server, since
+//   that will close your raft.NetworkTransport, which in turn closes the your
+//   rafttest.Layer, which closes your rafttest.Handler, which will ultimately
+//   close the channel returned by its Requests() method and signal the
+//   raftmembership.HandleChangeRequests function to return.
+//
+// To cleanly shutdown the service, first shutdown your raft.Raft instance,
+// then call the CloseStreams() method of your raft.NetworkTransport instance
+// (to close all connections) and then stop your http.Server.
+package rafthttp
diff --git a/vendor/github.com/CanonicalLtd/raft-http/handler.go b/vendor/github.com/CanonicalLtd/raft-http/handler.go
new file mode 100644
index 0000000000..2a0d9d7355
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/handler.go
@@ -0,0 +1,212 @@
+package rafthttp
+
+import (
+	"fmt"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"sync"
+	"time"
+
+	"github.com/CanonicalLtd/raft-membership"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// Handler implements an HTTP handler that will look for an Upgrade
+// header in the request to switch the HTTP connection to raw TCP
+// mode, so it can be used as raft.NetworkTransport stream.
+type Handler struct {
+	requests    chan *raftmembership.ChangeRequest // Membership requests are pushed to this channel
+	connections chan net.Conn                      // New Raft connections are pushed to this channel.
+	shutdown    chan struct{}                      // Used to stop processing membership requests.
+	timeout     time.Duration                      // Maximum time to wait for requests to be processed.
+	logger      *log.Logger                        // Logger to use.
+	mu          sync.RWMutex                       // Blocks closing until all membership requests are handled
+}
+
+// NewHandler returns a new Handler.
+//
+// Incoming raft membership requests (received via POST and DELETE) are
+// forwarded to the given channel, which is supposed to be processed using
+// raftmembership.HandleChangeRequests().
+func NewHandler() *Handler {
+	logger := log.New(os.Stderr, "", log.LstdFlags)
+	return NewHandlerWithLogger(logger)
+}
+
+// NewHandlerWithLogger returns a new Handler configured with the given logger.
+func NewHandlerWithLogger(logger *log.Logger) *Handler {
+	return &Handler{
+		requests:    make(chan *raftmembership.ChangeRequest),
+		connections: make(chan net.Conn),
+		shutdown:    make(chan struct{}),
+		timeout:     10 * time.Second,
+		logger:      logger,
+	}
+}
+
+// Requests returns a channel of inbound Raft membership change requests
+// received over HTTP. Consumer code is supposed to process this channel by
+// invoking raftmembership.HandleChangeRequests.
+func (h *Handler) Requests() <-chan *raftmembership.ChangeRequest {
+	return h.requests
+}
+
+// Timeout sets the maximum amount of time for a request to be processed. It
+// defaults to 10 seconds if not set.
+func (h *Handler) Timeout(timeout time.Duration) {
+	h.timeout = timeout
+}
+
+// Close stops handling incoming requests.
+func (h *Handler) Close() {
+	close(h.shutdown)
+
+	// Block until all pending requests are done. After that no new
+	// requests will be sent to the requests channel since the shutdown
+	// channel is closed.
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	close(h.requests)
+}
+
+// ServerHTTP upgrades the given HTTP connection to a raw TCP one for
+// use by raft.
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	switch r.Method {
+	case "GET":
+		h.handleGet(w, r)
+	case "POST":
+		h.handlePost(w, r)
+	case "DELETE":
+		h.handleDelete(w, r)
+	default:
+		http.Error(w, "unknown action", http.StatusMethodNotAllowed)
+	}
+}
+
+func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
+	if r.Header.Get("Upgrade") != "raft" {
+		http.Error(w, "missing or invalid upgrade header", http.StatusBadRequest)
+		return
+	}
+
+	hijacker, ok := w.(http.Hijacker)
+	if !ok {
+		http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError)
+		return
+	}
+
+	conn, _, err := hijacker.Hijack()
+	if err != nil {
+		message := errors.Wrap(err, "failed to hijack connection").Error()
+		http.Error(w, message, http.StatusInternalServerError)
+		return
+	}
+
+	// Write the status line and upgrade header by hand since w.WriteHeader()
+	// would fail after Hijack()
+	data := []byte("HTTP/1.1 101 Switching Protocols\r\nUpgrade: raft\r\n\r\n")
+	if n, err := conn.Write(data); err != nil || n != len(data) {
+		conn.Close()
+		return
+	}
+
+	// We don't need to watch for the shutdown channel here, because if the
+	// shutdown channel gets closed, Layer.Accept() will eventually return
+	// error causing the raft.NetworkTransport.listen() method to return
+	// (the assumption is that the raft instance is being shutdown). At
+	// that point, nobody will be calling Layer.Accept() anymore and we'll
+	// block sending to the h.connections channel until h.timeout expires.
+	h.logger.Printf("[INFO] raft-http: Establishing new connection with %s", r.Host)
+	select {
+	case <-h.shutdown:
+		h.logger.Printf("[ERR] raft-http: Connection from %s dropped since we have shutdowns", r.Host)
+		conn.Close()
+	case h.connections <- conn:
+	case <-time.After(h.timeout):
+		h.logger.Printf("[ERR] raft-http: Connection from %s not processed within %s", r.Host, h.timeout)
+		conn.Close()
+	}
+}
+
+func (h *Handler) handlePost(w http.ResponseWriter, r *http.Request) {
+	query := r.URL.Query()
+	id := raft.ServerID(query.Get("id"))
+	address := raft.ServerAddress(query.Get("address"))
+
+	h.logger.Printf("[INFO] raft-http: Handling join request for node %s (%s)", id, address)
+
+	request := raftmembership.NewJoinRequest(id, address)
+	h.changeMembership(w, r, request)
+}
+
+func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) {
+	query := r.URL.Query()
+	id := raft.ServerID(query.Get("id"))
+
+	h.logger.Printf("[INFO] raft-http: Handling leave request for node %s", id)
+
+	request := raftmembership.NewLeaveRequest(id)
+	h.changeMembership(w, r, request)
+}
+
+func (h *Handler) changeMembership(w http.ResponseWriter, r *http.Request, request *raftmembership.ChangeRequest) {
+	// Acquire a read lock, so Close() will block until all change
+	// membership requests are done.
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+
+	// Fail immediately if we've been closed.
+	select {
+	case <-h.shutdown:
+		http.Error(w, "raft transport closed", http.StatusForbidden)
+		return
+	default:
+	}
+
+	// Sanity check before actually trying to process the request.
+	if request.ID() == "" {
+		http.Error(w, "no server ID provided", http.StatusBadRequest)
+		return
+	}
+
+	// It's safe to block here, since HandleChangeRequests has an internal
+	// timeout, which will abort a request if takes too long.
+	h.requests <- request
+
+	err := request.Error(h.timeout)
+	if err == nil {
+		return
+	}
+
+	var code int
+
+	switch err := err.(type) {
+	case *raftmembership.ErrDifferentLeader:
+		// If we fail because the current node is not the leader, send
+		// a redirect.
+		url := &url.URL{
+			Scheme:   "http", // XXX TODO: handle HTTPS
+			Path:     r.URL.Path,
+			RawQuery: r.URL.RawQuery,
+			Host:     err.Leader(),
+		}
+		http.Redirect(w, r, url.String(), http.StatusPermanentRedirect)
+		return
+	case *raftmembership.ErrUnknownLeader:
+		// If we fail because we currently don't know the leader, hint
+		// the client to retry.
+		code = http.StatusServiceUnavailable
+	default:
+		code = http.StatusForbidden
+	}
+
+	message := errors.Wrap(err, fmt.Sprintf(
+		"failed to %s server %s", request.Kind(), request.ID())).Error()
+	http.Error(w, message, code)
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-http/layer.go b/vendor/github.com/CanonicalLtd/raft-http/layer.go
new file mode 100644
index 0000000000..cdba5dbb42
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/layer.go
@@ -0,0 +1,132 @@
+package rafthttp
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/CanonicalLtd/raft-membership"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// NewLayer returns a new raft stream layer that initiates connections
+// with HTTP and then uses Upgrade to switch them into raw TCP.
+func NewLayer(path string, localAddr net.Addr, handler *Handler, dial Dial) *Layer {
+	//logger := log.New(os.Stderr, "", log.LstdFlags)
+	logger := log.New(ioutil.Discard, "", 0)
+	return NewLayerWithLogger(path, localAddr, handler, dial, logger)
+}
+
+// NewLayerWithLogger returns a Layer using the specified logger.
+func NewLayerWithLogger(path string, localAddr net.Addr, handler *Handler, dial Dial, logger *log.Logger) *Layer {
+	return &Layer{
+		path:      path,
+		localAddr: localAddr,
+		handler:   handler,
+		dial:      dial,
+		logger:    logger,
+	}
+}
+
+// Layer represents the connection between raft nodes.
+type Layer struct {
+	path      string
+	localAddr net.Addr
+	handler   *Handler
+	dial      Dial
+	logger    *log.Logger
+}
+
+// Accept waits for the next connection.
+func (l *Layer) Accept() (net.Conn, error) {
+	select {
+	case conn := <-l.handler.connections:
+		return conn, nil
+	case <-l.handler.shutdown:
+		return nil, io.EOF
+	}
+}
+
+// Close closes the layer.
+func (l *Layer) Close() error {
+	l.handler.Close()
+	return nil
+}
+
+// Addr returns the local address for the layer.
+func (l *Layer) Addr() net.Addr {
+	return l.localAddr
+}
+
+// Dial creates a new network connection.
+func (l *Layer) Dial(addr raft.ServerAddress, timeout time.Duration) (net.Conn, error) {
+	l.logger.Printf("[INFO] raft-http: Connecting to %s", addr)
+
+	url := makeURL(l.path)
+	request := &http.Request{
+		Method:     "GET",
+		URL:        url,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     make(http.Header),
+		Host:       l.Addr().String(),
+	}
+	request.Header.Set("Upgrade", "raft")
+
+	conn, err := l.dial(string(addr), timeout)
+	if err != nil {
+		return nil, errors.Wrap(err, "dialing failed")
+	}
+
+	if err := request.Write(conn); err != nil {
+		return nil, errors.Wrap(err, "sending HTTP request failed")
+	}
+
+	response, err := http.ReadResponse(bufio.NewReader(conn), request)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to read response")
+	}
+	if response.StatusCode != http.StatusSwitchingProtocols {
+		return nil, fmt.Errorf("dialing fail: expected status code 101 got %d", response.StatusCode)
+	}
+	if response.Header.Get("Upgrade") != "raft" {
+		return nil, fmt.Errorf("missing or unexpected Upgrade header in response")
+	}
+	return conn, err
+}
+
+// Join tries to join the cluster by contacting the leader at the given
+// address. The raft node associated with this layer must have the given server
+// identity.
+func (l *Layer) Join(id raft.ServerID, addr raft.ServerAddress, timeout time.Duration) error {
+	l.logger.Printf("[INFO] raft-http: Joining cluster at %s as node %s", addr, id)
+
+	return l.changeMemberhip(raftmembership.JoinRequest, id, addr, timeout)
+}
+
+// Leave tries to leave the cluster by contacting the leader at the given
+// address.  The raft node associated with this layer must have the given
+// server identity.
+func (l *Layer) Leave(id raft.ServerID, addr raft.ServerAddress, timeout time.Duration) error {
+	l.logger.Printf("[INFO] raft-http: Leaving cluster at %s as node %s", addr, id)
+
+	return l.changeMemberhip(raftmembership.LeaveRequest, id, addr, timeout)
+}
+
+// Change the membership of the server associated with this layer.
+func (l *Layer) changeMemberhip(kind raftmembership.ChangeRequestKind, id raft.ServerID, addr raft.ServerAddress, timeout time.Duration) error {
+	return ChangeMembership(kind, l.path, l.dial, id, l.Addr().String(), string(addr), timeout)
+}
+
+// Map a membership ChangeRequest kind code to an HTTP method name.
+var membershipChangeRequestKindToMethod = map[raftmembership.ChangeRequestKind]string{
+	raftmembership.JoinRequest:  "POST",
+	raftmembership.LeaveRequest: "DELETE",
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-http/membership.go b/vendor/github.com/CanonicalLtd/raft-http/membership.go
new file mode 100644
index 0000000000..a83100b3a7
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-http/membership.go
@@ -0,0 +1,95 @@
+package rafthttp
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/CanonicalLtd/raft-membership"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// ChangeMembership can be used to join or leave a cluster over HTTP.
+func ChangeMembership(
+	kind raftmembership.ChangeRequestKind,
+	path string,
+	dial Dial,
+	id raft.ServerID,
+	address, target string,
+	timeout time.Duration) error {
+	url := makeURL(path)
+	url.RawQuery = fmt.Sprintf("id=%s", id)
+	if kind == raftmembership.JoinRequest {
+		url.RawQuery += fmt.Sprintf("&address=%s", address)
+	}
+	url.Host = target
+	url.Scheme = "http"
+	method := membershipChangeRequestKindToMethod[kind]
+	request := &http.Request{
+		Method:     method,
+		URL:        url,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     make(http.Header),
+	}
+
+	remaining := timeout
+	var response *http.Response
+	var err error
+	for remaining > 0 {
+		start := time.Now()
+		netDial := func(network, addr string) (net.Conn, error) {
+			return dial(addr, remaining)
+		}
+		client := &http.Client{
+			Timeout:   remaining,
+			Transport: &http.Transport{Dial: netDial},
+		}
+		response, err = client.Do(request)
+
+		// If we got a system or network error, just return it.
+		if err != nil {
+			break
+		}
+
+		// If we got an HTTP error, let's capture its details,
+		// and possibly return it if it's not retriable or we
+		// have hit our timeout.
+		if response.StatusCode != http.StatusOK {
+			body, _ := ioutil.ReadAll(response.Body)
+			err = fmt.Errorf(
+				"http code %d '%s'", response.StatusCode,
+				strings.TrimSpace(string(body)))
+		}
+		// If there's a temporary failure, let's retry.
+		if response.StatusCode == http.StatusServiceUnavailable {
+			// XXX TODO: use an exponential backoff
+			// relative to the timeout?
+			time.Sleep(100 * time.Millisecond)
+
+			remaining -= time.Since(start)
+			continue
+		}
+
+		break
+	}
+	if err != nil {
+		return errors.Wrap(err, fmt.Sprintf("server %s failed", kind))
+	}
+	return nil
+}
+
+// Build a full url.URL object out of our path.
+func makeURL(path string) *url.URL {
+	url, err := url.Parse(path)
+	if err != nil {
+		panic(fmt.Sprintf("invalid URL path %s", path))
+	}
+	return url
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/AUTHORS b/vendor/github.com/CanonicalLtd/raft-membership/AUTHORS
new file mode 100644
index 0000000000..6e13f86ebb
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/AUTHORS
@@ -0,0 +1 @@
+Free Ekanayaka <free.ekanayaka at canonical.com>
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/LICENSE b/vendor/github.com/CanonicalLtd/raft-membership/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/README.md b/vendor/github.com/CanonicalLtd/raft-membership/README.md
new file mode 100644
index 0000000000..f3a36a9601
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/README.md
@@ -0,0 +1,11 @@
+raft-membership [![Build Status](https://travis-ci.org/CanonicalLtd/raft-membership.png)](https://travis-ci.org/CanonicalLtd/raft-membership) [![Coverage Status](https://coveralls.io/repos/github/CanonicalLtd/raft-membership/badge.svg?branch=master)](https://coveralls.io/github/CanonicalLtd/raft-membership?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/CanonicalLtd/raft-membership)](https://goreportcard.com/report/github.com/CanonicalLtd/raft-membership) [![GoDoc](https://godoc.org/github.com/CanonicalLtd/raft-membership?status.svg)](https://godoc.org/github.com/CanonicalLtd/raft-membership)
+=========
+
+This repository provides the `raftmembership` package, which contains
+an extensions of the `raft` Go [package](https://github.com/hashicorp/raft)
+from Hashicorp to easily make a node join or leave a cluster.
+
+Documentation
+==============
+
+The documentation for this package can be found on [Godoc](http://godoc.org/github.com/CanonicalLtd/raft-membership).
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/changer.go b/vendor/github.com/CanonicalLtd/raft-membership/changer.go
new file mode 100644
index 0000000000..06d3d01f77
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/changer.go
@@ -0,0 +1,36 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftmembership
+
+import (
+	"time"
+
+	"github.com/hashicorp/raft"
+)
+
+// Changer is an API that can be used by a raft server to change its
+// membership in a cluster (i.e. either join it or leave it).
+//
+// It works by using some transport layer (e.g. HTTP, TCP, etc) to
+// send a membership change request to a target server that is part of
+// the cluster and that can handle such requests, possibly redirecting
+// the requesting server to another server (e.g. the cluster leader).
+//
+// It is effectively an extensions of the raft.Transport interface,
+// with additional semantics for joining/leaving a raft cluster.
+type Changer interface {
+	Join(raft.ServerID, raft.ServerAddress, time.Duration) error
+	Leave(raft.ServerID, raft.ServerAddress, time.Duration) error
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/errors.go b/vendor/github.com/CanonicalLtd/raft-membership/errors.go
new file mode 100644
index 0000000000..15391126a1
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/errors.go
@@ -0,0 +1,49 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftmembership
+
+import (
+	"fmt"
+)
+
+// ErrDifferentLeader is returned by ChangeRequest.Error() when the
+// request to join or leave a cluster failed because the target peer
+// is not the leader. The network address of the leader as currently
+// known by the target peer is attached to the error, so clients can
+// perform again the request, this time using the given leader address
+// as target peer.
+type ErrDifferentLeader struct {
+	leader string
+}
+
+// Leader is the address of the leader as currently known.
+func (e *ErrDifferentLeader) Leader() string {
+	return e.leader
+}
+
+func (e *ErrDifferentLeader) Error() string {
+	return fmt.Sprintf("node is not leader, current leader at: %s", e.leader)
+}
+
+// ErrUnknownLeader is returned by ChangeRequest.Error() when the
+// request to join or leave a cluster failed because the target peer
+// is not the leader, and at the moment it also does not know the
+// address of the leader (this can happen for example during leader
+// elections). Clients typically want to retry after a short time.
+type ErrUnknownLeader struct{}
+
+func (e *ErrUnknownLeader) Error() string {
+	return "node is not leader, current leader unknown"
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/handle.go b/vendor/github.com/CanonicalLtd/raft-membership/handle.go
new file mode 100644
index 0000000000..fb467459ae
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/handle.go
@@ -0,0 +1,65 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftmembership
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/hashicorp/raft"
+)
+
+// HandleChangeRequests processes ChangeRequest's received through the
+// given channel, using the given raft.Raft instance to add or remove
+// peers to the cluster according to the received requests.
+func HandleChangeRequests(r *raft.Raft, requests <-chan *ChangeRequest) {
+	for request := range requests {
+
+		// If we currently think we're the leader, let's try
+		// to handle the request, otherwise let's bail out
+		// directly.
+		var err error
+		if r.State() == raft.Leader {
+			err = changeMembership(r, request).Error()
+		} else {
+			err = raft.ErrNotLeader
+		}
+
+		// Wrap not-leader errors.
+		if err == raft.ErrNotLeader {
+			if r.Leader() != "" {
+				err = &ErrDifferentLeader{leader: string(r.Leader())}
+			} else {
+				err = &ErrUnknownLeader{}
+			}
+		}
+
+		request.Done(err)
+	}
+}
+
+// Execute the appropriate Raft to handle the given request.
+func changeMembership(raft *raft.Raft, request *ChangeRequest) raft.Future {
+	kind := request.Kind()
+	timeout := 10 * time.Second // FIXME: should be configurable
+	switch kind {
+	case JoinRequest:
+		return raft.AddVoter(request.ID(), request.Address(), 0, timeout)
+	case LeaveRequest:
+		return raft.RemoveServer(request.ID(), 0, timeout)
+	default:
+		panic(fmt.Sprintf("invalid change request kind: %d", int(kind)))
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-membership/request.go b/vendor/github.com/CanonicalLtd/raft-membership/request.go
new file mode 100644
index 0000000000..59efaa44f7
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-membership/request.go
@@ -0,0 +1,113 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftmembership
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/hashicorp/raft"
+)
+
+// ChangeRequest represents a request to change a server's membership in
+// a raft cluster (either join or leave).
+//
+// A requesting server uses an implementation of the membership Changer
+// interface to connect to a target server through some network transport layer
+// and to ask to join or leave the target server's cluster. The target server
+// internally uses ChangeRequest and HandleChangeRequests as helpers to
+// implement handlers to process such requests coming from the network
+// transport layer.
+type ChangeRequest struct {
+	id      raft.ServerID      // ID of the server requesting the membership change
+	address raft.ServerAddress // Address of the server requesting the membership change
+	kind    ChangeRequestKind  // Kind of membership change being requested
+	done    chan error         // Notify client code of request success or failure
+}
+
+// NewJoinRequest creates a new membership ChangeRequest to join a
+// cluster.
+func NewJoinRequest(id raft.ServerID, address raft.ServerAddress) *ChangeRequest {
+	return &ChangeRequest{
+		id:      id,
+		address: address,
+		kind:    JoinRequest,
+		done:    make(chan error, 1),
+	}
+}
+
+// NewLeaveRequest creates a new membership ChangeRequest to leave a
+// cluster.
+func NewLeaveRequest(id raft.ServerID) *ChangeRequest {
+	return &ChangeRequest{
+		id:   id,
+		kind: LeaveRequest,
+		done: make(chan error, 1),
+	}
+}
+
+// ID of the server requesting to change its membership.
+func (r *ChangeRequest) ID() raft.ServerID {
+	return r.id
+}
+
+// Address of the server requesting to change its membership.
+func (r *ChangeRequest) Address() raft.ServerAddress {
+	return r.address
+}
+
+// Kind is the type of membership change requested, either join leave.
+func (r *ChangeRequest) Kind() ChangeRequestKind {
+	return r.kind
+}
+
+// Error blocks until this ChangeRequest is fully processed or the given
+// timeout is reached and returns any error hit while handling the request, or
+// nil if none was met.
+func (r *ChangeRequest) Error(timeout time.Duration) error {
+	var err error
+	select {
+	case err = <-r.done:
+	case <-time.After(timeout):
+		err = fmt.Errorf("timeout waiting for membership change")
+	}
+	return err
+}
+
+// Done should be invoked by the code handling this request (such as
+// HandleChangeRequests) to notify callers that the it has been
+// processed, either successfully or not.
+func (r *ChangeRequest) Done(err error) {
+	r.done <- err
+	close(r.done)
+}
+
+// ChangeRequestKind is kind of membership change being requested.
+type ChangeRequestKind int
+
+func (k ChangeRequestKind) String() string {
+	return changeRequestKindToString[k]
+}
+
+// Possible values for ChangeRequestKind
+const (
+	JoinRequest ChangeRequestKind = iota
+	LeaveRequest
+)
+
+var changeRequestKindToString = []string{
+	JoinRequest:  "join",
+	LeaveRequest: "leave",
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/AUTHORS b/vendor/github.com/CanonicalLtd/raft-test/AUTHORS
new file mode 100644
index 0000000000..6e13f86ebb
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/AUTHORS
@@ -0,0 +1 @@
+Free Ekanayaka <free.ekanayaka at canonical.com>
diff --git a/vendor/github.com/CanonicalLtd/raft-test/LICENSE b/vendor/github.com/CanonicalLtd/raft-test/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/CanonicalLtd/raft-test/README.md b/vendor/github.com/CanonicalLtd/raft-test/README.md
new file mode 100644
index 0000000000..7a6b645b52
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/README.md
@@ -0,0 +1,11 @@
+raft-test [![Build Status](https://travis-ci.org/CanonicalLtd/raft-test.png)](https://travis-ci.org/CanonicalLtd/raft-test) [![Coverage Status](https://coveralls.io/repos/github/CanonicalLtd/raft-test/badge.svg?branch=master)](https://coveralls.io/github/CanonicalLtd/raft-test?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/CanonicalLtd/raft-test)](https://goreportcard.com/report/github.com/CanonicalLtd/raft-test) [![GoDoc](https://godoc.org/github.com/CanonicalLtd/raft-test?status.svg)](https://godoc.org/github.com/CanonicalLtd/raft-test)
+=========
+
+This repository provides the `rafttest` package, which contains
+helpers to test code based on the `raft` Go [package](https://github.com/hashicorp/raft)
+from Hashicorp.
+
+Documentation
+==============
+
+The documentation for this package can be found on [Godoc](http://godoc.org/github.com/CanonicalLtd/raft-test).
diff --git a/vendor/github.com/CanonicalLtd/raft-test/cluster.go b/vendor/github.com/CanonicalLtd/raft-test/cluster.go
new file mode 100644
index 0000000000..c5db316c8e
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/cluster.go
@@ -0,0 +1,303 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"fmt"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/CanonicalLtd/raft-test/internal/election"
+	"github.com/CanonicalLtd/raft-test/internal/fsms"
+	"github.com/CanonicalLtd/raft-test/internal/logging"
+	"github.com/CanonicalLtd/raft-test/internal/network"
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Cluster creates n raft servers, one for each of the given FSMs, and returns
+// a Control object that can be used to create deterministic test scenarios,
+// deciding which server is elected as leader and if and when a failure should
+// happen during its term.
+//
+// Each raft.Raft instance is created with sane test-oriented default
+// dependencies, which include:
+//
+// - very low configuration timeouts
+// - in-memory transports
+// - in-memory log and stable stores
+// - in-memory snapshot stores
+//
+// You can tweak the default dependencies using the Config, Transport and
+// LogStore options.
+//
+// All created raft servers will be part of the cluster and act as voting
+// servers, unless the Servers option is used.
+//
+// If a GO_RAFT_TEST_LATENCY environment is found, the default configuration
+// timeouts will be scaled up accordingly (useful when running tests on slow
+// hardware). A latency of 1.0 is a no-op, since it just keeps the default
+// values unchanged. A value greater than 1.0 increases the default timeouts by
+// that factor. See also the Duration helper.
+func Cluster(t testing.TB, fsms []raft.FSM, options ...Option) (map[raft.ServerID]*raft.Raft, *Control) {
+	logger := logging.New(t, "DEBUG")
+	logger.Debug(fmt.Sprintf("[DEBUG] raft-test: setup: start (%d servers)", len(fsms)))
+
+	// Create a set of default dependencies for each server.
+	dependencies := make([]*dependencies, len(fsms))
+	for i, fsm := range fsms {
+		dependencies[i] = newDefaultDependencies(t, logger, i, fsm)
+	}
+
+	// Customize the default dependencies by applying the given options.
+	for _, option := range options {
+		option(dependencies)
+	}
+
+	// Honor the GO_RAFT_TEST_LATENCY env var, if set.
+	setTimeouts(dependencies)
+
+	// Instrument the Config of each server with a NotifyCh and return a
+	// leadership object for watching them.
+	leadership := instrumentConfigs(t, logger, dependencies)
+
+	// Instrument all servers by replacing their transports with transport
+	// wrappers, creating a network object to control them.
+	network := instrumentTransports(logger, dependencies)
+
+	// Instrument all servers by replacing their fsms with wrapper fsms,
+	// creating a watcher to observe them.
+	watcher := instrumentFSMs(logger, dependencies)
+
+	// Bootstrap the initial cluster configuration.
+	bootstrapCluster(t, logger, dependencies)
+
+	// Start the individual servers.
+	servers := make(map[raft.ServerID]*raft.Raft)
+	confs := make(map[raft.ServerID]*raft.Config)
+	for _, d := range dependencies {
+		id := d.Conf.LocalID
+		logger.Debug(fmt.Sprintf("[DEBUG] raft-test: setup: server %s: start", id))
+		raft, err := newRaft(d)
+		if err != nil {
+			logger.Debug(fmt.Sprintf("[DEBUG] raft-test: setup: error: server %s failed to start: %v", id, err))
+		}
+		confs[id] = d.Conf
+		servers[id] = raft
+	}
+
+	// Create the Control instance for this cluster
+	control := &Control{
+		t:        t,
+		logger:   logger,
+		election: leadership,
+		network:  network,
+		watcher:  watcher,
+		confs:    confs,
+		servers:  servers,
+	}
+
+	logger.Debug("[DEBUG] raft-test: setup: done")
+
+	return servers, control
+}
+
+// Option can be used to tweak the dependencies of test Raft servers created with
+// Cluster() or Server().
+type Option func([]*dependencies)
+
+// Hold dependencies for a single dependencies.
+type dependencies struct {
+	Conf          *raft.Config
+	FSM           raft.FSM
+	Logs          raft.LogStore
+	Stable        raft.StableStore
+	Snaps         raft.SnapshotStore
+	Configuration *raft.Configuration
+	Trans         raft.Transport
+	Voter         bool // Whether this is voter server in the initial configuration
+}
+
+// Create default dependencies for a single raft server.
+func newDefaultDependencies(t testing.TB, logger hclog.Logger, i int, fsm raft.FSM) *dependencies {
+	// Use the server's index as its server ID and address.
+	addr := strconv.Itoa(i)
+	_, transport := raft.NewInmemTransport(raft.ServerAddress(addr))
+
+	conf := raft.DefaultConfig()
+	conf.LocalID = raft.ServerID(addr)
+	conf.Logger = logger
+
+	// Set low timeouts.
+	conf.HeartbeatTimeout = 15 * time.Millisecond
+	conf.ElectionTimeout = 15 * time.Millisecond
+	conf.CommitTimeout = 1 * time.Millisecond
+	conf.LeaderLeaseTimeout = 10 * time.Millisecond
+
+	// Set very high values to prevent snapshots to happen randomly.
+	conf.SnapshotInterval = 24 * time.Hour
+	conf.SnapshotThreshold = 4096
+
+	// Set the snapshot to retain only one log, since the most common use
+	// case is to test an FSM restore from a snapshot.
+	conf.TrailingLogs = 1
+
+	store := raft.NewInmemStore()
+	return &dependencies{
+		Conf:   conf,
+		FSM:    fsm,
+		Logs:   store,
+		Stable: store,
+		Snaps:  raft.NewInmemSnapshotStore(),
+		Trans:  transport,
+		Voter:  true,
+	}
+}
+
+// Set scaled timeouts on all servers, to match GO_RAFT_TEST_LATENCY (if set).
+func setTimeouts(dependencies []*dependencies) {
+	for _, d := range dependencies {
+		d.Conf.HeartbeatTimeout = Duration(d.Conf.HeartbeatTimeout)
+		d.Conf.ElectionTimeout = Duration(d.Conf.ElectionTimeout)
+		d.Conf.CommitTimeout = Duration(d.Conf.CommitTimeout)
+		d.Conf.LeaderLeaseTimeout = Duration(d.Conf.LeaderLeaseTimeout)
+	}
+}
+
+// Set leader notification channels on all servers.
+func instrumentConfigs(t testing.TB, logger hclog.Logger, dependencies []*dependencies) *election.Tracker {
+	t.Helper()
+
+	tracker := election.NewTracker(logger)
+
+	for _, d := range dependencies {
+		id := d.Conf.LocalID
+		if d.Conf.NotifyCh != nil {
+			t.Fatalf("raft-test: setup: error: found NotifyCh on server %s set via Config option", id)
+		}
+		// Use an unbuffered channel, so raft will block on us.
+		notifyCh := make(chan bool)
+		d.Conf.NotifyCh = notifyCh
+		tracker.Track(id, notifyCh)
+	}
+
+	return tracker
+}
+
+// Replace the dependencies.Trans object on each server with a faulty transport
+// that wraps the real transport. Return a network object that knows about the
+// these wrappers and that inject various kind of failures.
+func instrumentTransports(logger hclog.Logger, dependencies []*dependencies) *network.Network {
+	// Connect to each others all the servers that use a LoopbackTransport
+	// (the default). However, actual connectivity control will be
+	// performed by the network object
+	connectLoobackTransports(dependencies)
+
+	network := network.New(logger)
+
+	for _, d := range dependencies {
+		d.Trans = network.Add(d.Conf.LocalID, d.Trans)
+	}
+
+	return network
+}
+
+// Replace the dependencies.FSM object on each server with a wrapper FSM that
+// wraps the real FSM. Return a watcher object that can be used to get notified
+// of various events.
+func instrumentFSMs(logger hclog.Logger, dependencies []*dependencies) *fsms.Watcher {
+	watcher := fsms.New(logger)
+
+	for _, d := range dependencies {
+		d.FSM = watcher.Add(d.Conf.LocalID, d.FSM)
+	}
+
+	return watcher
+}
+
+// Connect loopback transports from servers that have them.
+func connectLoobackTransports(dependencies []*dependencies) {
+	loopbacks := make([]raft.LoopbackTransport, 0)
+	for _, d := range dependencies {
+		loopback, ok := d.Trans.(raft.LoopbackTransport)
+		if ok {
+			loopbacks = append(loopbacks, loopback)
+		}
+	}
+
+	for i, t1 := range loopbacks {
+		for j, t2 := range loopbacks {
+			if i == j {
+				continue
+			}
+			t1.Connect(t2.LocalAddr(), t2)
+			t2.Connect(t1.LocalAddr(), t1)
+		}
+	}
+}
+
+// Bootstrap the cluster, including in the initial configuration of each voting
+// server.
+func bootstrapCluster(t testing.TB, logger hclog.Logger, dependencies []*dependencies) {
+	t.Helper()
+
+	// Figure out which servers should be part of the initial
+	// configuration.
+	servers := make([]raft.Server, 0)
+	for _, d := range dependencies {
+		id := d.Conf.LocalID
+		if !d.Voter {
+			// If the server is not initially part of the cluster,
+			// there's nothing to do.
+			logger.Debug(fmt.Sprintf("[DEBUG] raft-test: setup: server %s: skip bootstrap (not part of initial configuration)", id))
+			continue
+		}
+		server := raft.Server{
+			ID:      id,
+			Address: d.Trans.LocalAddr(),
+		}
+		servers = append(servers, server)
+	}
+
+	// Create the initial cluster configuration.
+	configuration := raft.Configuration{Servers: servers}
+	for i := 0; i < len(dependencies); i++ {
+		d := dependencies[i]
+		id := d.Conf.LocalID
+		if !d.Voter {
+			continue
+		}
+		logger.Debug(fmt.Sprintf("[DEBUG] raft-test: setup: server %s: bootstrap", id))
+		err := raft.BootstrapCluster(
+			d.Conf,
+			d.Logs,
+			d.Stable,
+			d.Snaps,
+			d.Trans,
+			configuration,
+		)
+		if err != nil {
+			t.Fatalf("raft-test: setup: error: server %s failed to bootstrap: %v", id, err)
+		}
+	}
+
+}
+
+// Convenience around raft.NewRaft for creating a new Raft instance using the
+// given dependencies.
+func newRaft(d *dependencies) (*raft.Raft, error) {
+	return raft.NewRaft(d.Conf, d.FSM, d.Logs, d.Stable, d.Snaps, d.Trans)
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/control.go b/vendor/github.com/CanonicalLtd/raft-test/control.go
new file mode 100644
index 0000000000..2299072b04
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/control.go
@@ -0,0 +1,519 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"context"
+	"fmt"
+	"runtime"
+	"testing"
+	"time"
+
+	"github.com/CanonicalLtd/raft-test/internal/election"
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/CanonicalLtd/raft-test/internal/fsms"
+	"github.com/CanonicalLtd/raft-test/internal/network"
+	"github.com/hashicorp/raft"
+	"github.com/hashicorp/go-hclog"
+)
+
+// Control the events happening in a cluster of raft servers, such has leadership
+// changes, failures and shutdowns.
+type Control struct {
+	t        testing.TB
+	logger   hclog.Logger
+	election *election.Tracker
+	network  *network.Network
+	watcher  *fsms.Watcher
+	confs    map[raft.ServerID]*raft.Config
+	servers  map[raft.ServerID]*raft.Raft
+	errored  bool
+	deposing chan struct{}
+
+	// Current Term after Elect() was called, if any.
+	term *Term
+
+	// Future of any pending snapshot that has been scheduled with an
+	// event.
+	snapshotFuture raft.SnapshotFuture
+}
+
+// Close the control for this raft cluster, shutting down all servers and
+// stopping all monitoring goroutines.
+//
+// It must be called by every test creating a test cluster with Cluster().
+func (c *Control) Close() {
+	c.logger.Debug("[DEBUG] raft-test: close: start")
+
+	// First tell the election tracker that we don't care anymore about
+	// notifications. Any value received from the NotifyCh's will be dropped
+	// on the floor.
+	c.election.Ignore()
+
+	// Now shutdown the servers.
+	c.shutdownServers()
+
+	// Finally shutdown the election tracker since nothing will be
+	// sending to NotifyCh's.
+	c.election.Close()
+
+	c.logger.Debug("[DEBUG] raft-test: close: done")
+}
+
+// Elect a server as leader.
+//
+// When calling this method there must be no leader in the cluster and server
+// transports must all be disconnected from eacher.
+func (c *Control) Elect(id raft.ServerID) *Term {
+	c.t.Helper()
+
+	c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: start (server %s)", id))
+
+	// Wait for the current leader (if any) to be fully deposed.
+	if c.deposing != nil {
+		<-c.deposing
+	}
+
+	// Sanity check that no server is the leader.
+	for id, r := range c.servers {
+		if r.State() == raft.Leader {
+			c.t.Fatalf("raft-test: error: cluster has already a leader (server %s)", id)
+		}
+	}
+
+	// We might need to repeat the logic below a few times in case a
+	// follower hits its heartbeat timeout before the leader has chance to
+	// append entries to it and refresh the last contact timestamp (hence
+	// transitioning to candidate and starting a new election).
+	for n := 0; n < maxElectionRounds; n++ {
+		leadership := c.waitLeadershipAcquired(id)
+
+		// We did not acquire leadership, let's retry.
+		if leadership == nil {
+			if n < maxElectionRounds {
+				c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: retry %d ", id, n+1))
+				continue
+			}
+		}
+
+		// The given node became the leader, let's make sure
+		// that leadership is stable and that other nodes
+		// become followers.
+		if !c.waitLeadershipPropagated(id, leadership) {
+			if n < maxElectionRounds {
+				c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: retry %d ", id, n+1))
+				continue
+			}
+		}
+		// Now establish all remaining connections. E.g. for three nodes:
+		//
+		// L  <--- F1
+		// L  <--- F2
+		//
+		// and:
+		//
+		// F1 <--- F2
+		// F1 ---> F2
+		//
+		// This way the cluster is fully connected. foo
+		c.logger.Debug("[DEBUG] raft-test: elect: done")
+		term := &Term{
+			control:    c,
+			id:         id,
+			leadership: leadership,
+		}
+		c.term = term
+
+		return term
+	}
+	c.t.Fatalf("raft-test: server %s: did not acquire stable leadership", id)
+
+	return nil
+}
+
+// Barrier is used to wait for the cluster to settle to a stable state, where
+// all in progress Apply() commands are committed across all FSM associated
+// with servers that are not disconnected and all in progress snapshots and
+// restores have been performed.
+//
+// Usually you don't wan't to concurrently keep invoking Apply() on the cluster
+// raft instances while Barrier() is running.
+func (c *Control) Barrier() {
+	// Wait for snapshots to complete.
+	if c.snapshotFuture != nil {
+		if err := c.snapshotFuture.Error(); err != nil {
+			c.t.Fatalf("raft-test: snapshot failed: %v", err)
+		}
+	}
+
+	// Wait for inflight commands to be applied to the leader's FSM.
+	if c.term.id != "" {
+		// Set a relatively high timeout.
+		//
+		// TODO: let users specify the maximum amount of time a single
+		// Apply() to their FSM should take, and calculate this value
+		// accordingly.
+		timeout := Duration(time.Second)
+
+		if err := c.servers[c.term.id].Barrier(timeout).Error(); err != nil {
+			c.t.Fatalf("raft-test: leader barrier: %v", err)
+		}
+
+		// Wait for follower FSMs to catch up.
+		n := c.Commands(c.term.id)
+		events := make([]*event.Event, 0)
+		for id := range c.servers {
+			if id == c.term.id {
+				continue
+			}
+			// Skip disconnected followers.
+			if !c.network.PeerConnected(c.term.id, id) {
+				continue
+			}
+			event := c.watcher.WhenApplied(id, n)
+			events = append(events, event)
+		}
+		for _, event := range events {
+			<-event.Watch()
+			event.Ack()
+		}
+	}
+}
+
+// Depose the current leader.
+//
+// When calling this method a leader must have been previously elected with
+// Elect().
+//
+// It must not be called if the current term has scheduled a depose action with
+// Action.Depose().
+func (c *Control) Depose() {
+	event := event.New()
+	go c.deposeUponEvent(event, c.term.id, c.term.leadership)
+	event.Fire()
+	event.Block()
+}
+
+// Commands returns the total number of command logs applied by the FSM of the
+// server with the given ID.
+func (c *Control) Commands(id raft.ServerID) uint64 {
+	return c.watcher.Commands(id)
+}
+
+// Snapshots returns the total number of snapshots performed by the FSM of the
+// server with the given ID.
+func (c *Control) Snapshots(id raft.ServerID) uint64 {
+	return c.watcher.Snapshots(id)
+}
+
+// Restores returns the total number of restores performed by the FSM of the
+// server with the given ID.
+func (c *Control) Restores(id raft.ServerID) uint64 {
+	return c.watcher.Restores(id)
+}
+
+// Shutdown all raft nodes and fail the test if any of them errors out while
+// doing so.
+func (c *Control) shutdownServers() {
+	// Find the leader if there is one, and shut it down first. This should
+	// prevent it from getting stuck on shutdown while trying to send RPCs
+	// to the followers.
+	//
+	// TODO: this is arguably a workaround for a bug in the transport
+	// wrapper.
+	ids := make([]raft.ServerID, 0)
+	for id, r := range c.servers {
+		if r.State() == raft.Leader {
+			c.shutdownServer(id)
+			ids = append(ids, id)
+		}
+	}
+
+	// Shutdown the rest.
+	for id := range c.servers {
+		hasShutdown := false
+		for i := range ids {
+			if ids[i] == id {
+				hasShutdown = true
+				break
+			}
+		}
+		if !hasShutdown {
+			c.shutdownServer(id)
+			ids = append(ids, id)
+		}
+	}
+}
+
+// Shutdown a single server.
+func (c *Control) shutdownServer(id raft.ServerID) {
+	r := c.servers[id]
+	future := r.Shutdown()
+
+	// Expect the shutdown to happen within two seconds by default.
+	timeout := Duration(2 * time.Second)
+
+	// Watch for errors.
+	ch := make(chan error, 1)
+	go func(future raft.Future) {
+		ch <- future.Error()
+	}(future)
+
+	var err error
+	select {
+	case err = <-ch:
+		c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: close: server %s: shutdown done", id))
+	case <-time.After(timeout):
+		err = fmt.Errorf("timeout (%s)", timeout)
+	}
+	if err == nil {
+		return
+	}
+
+	c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: close: server %s: shutdown failed: %s", id, err))
+
+	buf := make([]byte, 1<<16)
+	n := runtime.Stack(buf, true)
+
+	c.t.Errorf("\n\t%s", buf[:n])
+	c.t.Fatalf("raft-test: close: error: server %s: shutdown error: %v", id, err)
+}
+
+// Wait for the given server to acquire leadership. Returns true on success,
+// false otherwise (i.e. if the timeout expires).
+func (c *Control) waitLeadershipAcquired(id raft.ServerID) *election.Leadership {
+	timeout := maximumElectionTimeout(c.confs) * maxElectionRounds
+	future := c.election.Expect(id, timeout)
+
+	c.watcher.Electing(id)
+
+	// Reset any leader-related state on the transport of the given server
+	// and connect it to all other servers, letting it send them RPCs
+	// messages but not viceversa. E.g. for three nodes:
+	//
+	// L ---> F1
+	// L ---> F2
+	//
+	// This way we are sure we are the only server that can possibly acquire
+	// leadership.
+	c.network.Electing(id)
+
+	// First wait for the given node to become leader.
+	c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: wait to become leader within %s", id, timeout))
+
+	leadership, err := future.Done()
+	if err != nil {
+		c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: did not become leader", id))
+	}
+	return leadership
+
+}
+
+// Wait that the leadership just acquired by server with the given id is
+// acknowledged by all other servers and they all permanently transition to the
+// follower state.
+func (c *Control) waitLeadershipPropagated(id raft.ServerID, leadership *election.Leadership) bool {
+	// The leadership propagation needs to happen within the leader lease
+	// timeout, otherwise the newly elected leader will step down.
+	timeout := maximumLeaderLeaseTimeout(c.confs)
+	c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: wait for other servers to become followers within %s", id, timeout))
+
+	// Get the current configuration, so we wait only for servers that are
+	// actually currently part of the cluster (some of them might have been
+	// excluded with the Servers option).
+	r := c.servers[id]
+	future := r.GetConfiguration()
+	if err := future.Error(); err != nil {
+		c.t.Fatalf("raft-test: control: server %s: failed to get configuration: %v", id, err)
+	}
+	servers := future.Configuration().Servers
+
+	timer := time.After(timeout)
+	address := c.network.Address(id)
+	for _, server := range servers {
+		other := server.ID
+		if other == id {
+			continue
+		}
+		r := c.servers[server.ID]
+		for {
+			// Check that we didn't lose leadership in the meantime.
+			select {
+			case <-leadership.Lost():
+				c.network.Deposing(id)
+				c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: lost leadership", id))
+				return false
+			case <-timer:
+				c.t.Fatalf("raft-test: elect: server %s: followers did not settle", id)
+			default:
+			}
+
+			// Check that this server is in follower mode, that it
+			// has set the elected sever as leader and that we were
+			// able to append at least one log entry to it (when a
+			// server becomes leader, it always sends a LogNoop).
+			if r.State() == raft.Follower && r.Leader() == address && c.network.HasAppendedLogsFromTo(id, other) {
+				c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: elect: server %s: became follower", other))
+				break
+			}
+			time.Sleep(time.Millisecond)
+		}
+	}
+
+	return true
+}
+
+// Return an event that gets fired when the n'th log command gets enqueued by
+// the given leader server.
+func (c *Control) whenCommandEnqueued(id raft.ServerID, n uint64) *event.Event {
+	return c.network.ScheduleEnqueueFailure(id, n)
+}
+
+// Return an event that gets fired when the n'th log command gets appended by
+// server with the given ID (which is supposed to be the leader) to all other
+// servers.
+func (c *Control) whenCommandAppended(id raft.ServerID, n uint64) *event.Event {
+	return c.network.ScheduleAppendFailure(id, n)
+}
+
+// Return an event that gets fired when the n'th log command gets committed on
+// server with the given ID (which is supposed to be the leader).
+func (c *Control) whenCommandCommitted(id raft.ServerID, n uint64) *event.Event {
+	return c.watcher.WhenApplied(id, n)
+}
+
+// Depose the server with the given ID when the given event fires.
+func (c *Control) deposeUponEvent(event *event.Event, id raft.ServerID, leadership *election.Leadership) {
+	// Sanity checks.
+	r := c.servers[id]
+	if r.State() != raft.Leader {
+		panic(fmt.Errorf("raft-test: server %s: is not leader", id))
+	}
+
+	<-event.Watch()
+
+	c.network.Deposing(id)
+
+	timeout := maximumLeaderLeaseTimeout(c.confs)
+
+	c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: node %s: state: wait leadership lost (timeout=%s)", id, timeout))
+
+	select {
+	case <-leadership.Lost():
+	case <-time.After(timeout):
+		c.t.Errorf("raft-test: server %s: error: timeout: leadership not lost", id)
+		c.errored = true
+	}
+	event.Ack()
+
+	if !c.errored {
+		c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: leadership lost", id))
+	}
+
+	c.deposing <- struct{}{}
+	c.deposing = nil
+	c.term = nil
+}
+
+// Take a snapshot on the server with the given ID when the given event fires.
+func (c *Control) snapshotUponEvent(event *event.Event, id raft.ServerID) {
+	<-event.Watch()
+
+	c.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: control: take snapshot", id))
+
+	r := c.servers[id]
+	c.snapshotFuture = r.Snapshot()
+
+	event.Ack()
+}
+
+// Compute the maximum time a leader election should take, according to the
+// given nodes configs.
+func maximumElectionTimeout(confs map[raft.ServerID]*raft.Config) time.Duration {
+	timeout := time.Duration(0)
+
+	for _, conf := range confs {
+		if conf.ElectionTimeout > timeout {
+			timeout = conf.ElectionTimeout
+		}
+	}
+
+	return timeout * timeoutRandomizationFactor
+}
+
+// Return the maximum leader lease timeout among the given nodes configs.
+func maximumLeaderLeaseTimeout(confs map[raft.ServerID]*raft.Config) time.Duration {
+	timeout := time.Duration(0)
+
+	for _, conf := range confs {
+		if conf.LeaderLeaseTimeout > timeout {
+			timeout = conf.LeaderLeaseTimeout
+		}
+	}
+
+	// Multiply the timeout by three to account for randomization.
+	return timeout * timeoutRandomizationFactor
+}
+
+const (
+	// Assume that a leader is elected within 25 rounds. Should be safe enough.
+	maxElectionRounds = 25
+
+	// Hashicorp's raft implementation randomizes timeouts between 1x and
+	// 2x. Multiplying by 4x makes it sure to expire the timeout.
+	timeoutRandomizationFactor = 4
+)
+
+// WaitLeader blocks until the given raft instance sets a leader (which
+// could possibly be the instance itself).
+//
+// It fails the test if this doesn't happen within the specified timeout.
+func WaitLeader(t testing.TB, raft *raft.Raft, timeout time.Duration) {
+	ctx, cancel := context.WithTimeout(context.Background(), timeout)
+	defer cancel()
+
+	waitLeader(ctx, t, raft)
+}
+
+func waitLeader(ctx context.Context, t testing.TB, raft *raft.Raft) {
+	t.Helper()
+
+	check := func() bool {
+		return raft.Leader() != ""
+	}
+	wait(ctx, t, check, 25*time.Millisecond, "no leader was set")
+}
+
+// Poll the given function at the given internval, until it returns true, or
+// the given context expires.
+func wait(ctx context.Context, t testing.TB, f func() bool, interval time.Duration, message string) {
+	t.Helper()
+
+	start := time.Now()
+	for {
+		select {
+		case <-ctx.Done():
+			if err := ctx.Err(); err == context.Canceled {
+				return
+			}
+			t.Fatalf("%s within %s", message, time.Since(start))
+		default:
+		}
+		if f() {
+			return
+		}
+		time.Sleep(interval)
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/duration.go b/vendor/github.com/CanonicalLtd/raft-test/duration.go
new file mode 100644
index 0000000000..a6142aa44f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/duration.go
@@ -0,0 +1,45 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"fmt"
+	"math"
+	"os"
+	"strconv"
+	"time"
+)
+
+// Duration is a convenience to scale the given duration according to the
+// GO_RAFT_TEST_LATENCY environment variable.
+func Duration(duration time.Duration) time.Duration {
+	factor := 1.0
+	if env := os.Getenv("GO_RAFT_TEST_LATENCY"); env != "" {
+		var err error
+		factor, err = strconv.ParseFloat(env, 64)
+		if err != nil {
+			panic(fmt.Sprintf("invalid value '%s' for GO_RAFT_TEST_LATENCY", env))
+		}
+	}
+	return scaleDuration(duration, factor)
+}
+
+func scaleDuration(duration time.Duration, factor float64) time.Duration {
+	if factor == 1.0 {
+		return duration
+	}
+
+	return time.Duration((math.Ceil(float64(duration) * factor)))
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/fsm.go b/vendor/github.com/CanonicalLtd/raft-test/fsm.go
new file mode 100644
index 0000000000..cd3bf5df33
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/fsm.go
@@ -0,0 +1,60 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"io"
+
+	"github.com/hashicorp/raft"
+)
+
+// FSM create a dummy FSMs.
+func FSM() raft.FSM {
+	return &fsm{}
+}
+
+// FSMs creates the given number of dummy FSMs.
+func FSMs(n int) []raft.FSM {
+	fsms := make([]raft.FSM, n)
+	for i := range fsms {
+		fsms[i] = FSM()
+	}
+	return fsms
+}
+
+// fsm is a dummy raft finite state machine that does nothing and
+// always no-ops.
+type fsm struct{}
+
+// Apply always return a nil error without doing anything.
+func (f *fsm) Apply(*raft.Log) interface{} { return nil }
+
+// Snapshot always return a dummy snapshot and no error without doing
+// anything.
+func (f *fsm) Snapshot() (raft.FSMSnapshot, error) { return &fsmSnapshot{}, nil }
+
+// Restore always return a nil error without reading anything from
+// the reader.
+func (f *fsm) Restore(io.ReadCloser) error { return nil }
+
+// fsmSnapshot a dummy implementation of an fsm snapshot.
+type fsmSnapshot struct{}
+
+// Persist always return a nil error without writing anything
+// to the sink.
+func (s *fsmSnapshot) Persist(sink raft.SnapshotSink) error { return nil }
+
+// Release is a no-op.
+func (s *fsmSnapshot) Release() {}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/election/future.go b/vendor/github.com/CanonicalLtd/raft-test/internal/election/future.go
new file mode 100644
index 0000000000..5adcf16f8f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/election/future.go
@@ -0,0 +1,61 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package election
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/hashicorp/raft"
+)
+
+// Future represents a request to acquire leadership that will eventually
+// succeed or fail.
+type Future struct {
+	// ID of the raft server that should acquire leadership.
+	id raft.ServerID
+
+	// If leadership is not acquire within this timeout, the future fails.
+	timeout time.Duration
+
+	// Notification about leadership being acquired.
+	acquiredCh chan struct{}
+
+	// Notification about leadership being lost.
+	lostCh chan struct{}
+}
+
+// Creates a new leadership future of the given server.
+func newFuture(id raft.ServerID, timeout time.Duration) *Future {
+	future := &Future{
+		id:         id,
+		timeout:    timeout,
+		acquiredCh: make(chan struct{}),
+		lostCh:     make(chan struct{}),
+	}
+	return future
+}
+
+// Done returns a Leadership object if leadership was acquired withing the
+// timeout, or an error otherwise.
+func (f *Future) Done() (*Leadership, error) {
+	select {
+	case <-f.acquiredCh:
+		leadership := newLeadership(f.id, f.lostCh)
+		return leadership, nil
+	case <-time.After(f.timeout):
+		return nil, fmt.Errorf("server %s: leadership not acquired within %s", f.id, f.timeout)
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/election/leadership.go b/vendor/github.com/CanonicalLtd/raft-test/internal/election/leadership.go
new file mode 100644
index 0000000000..b54885de92
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/election/leadership.go
@@ -0,0 +1,43 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package election
+
+import (
+	"github.com/hashicorp/raft"
+)
+
+// Leadership represents the leadership acquired by a server that was elected
+// as leader. It exposes methods to be notified about its loss, with the server
+// stepping down as leader.
+type Leadership struct {
+	// ID of the raft server that acquired the leadership.
+	id raft.ServerID
+
+	// Notification about leadership being lost.
+	lostCh chan struct{}
+}
+
+// Create new leadership object.
+func newLeadership(id raft.ServerID, lostCh chan struct{}) *Leadership {
+	return &Leadership{
+		id:     id,
+		lostCh: lostCh,
+	}
+}
+
+// Lost returns a channel that gets closed when leadership is lost.
+func (l *Leadership) Lost() chan struct{} {
+	return l.lostCh
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/election/notifier.go b/vendor/github.com/CanonicalLtd/raft-test/internal/election/notifier.go
new file mode 100644
index 0000000000..7ec4213f30
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/election/notifier.go
@@ -0,0 +1,149 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package election
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Notifiy about leadership changes in a single raft server.
+type notifier struct {
+	// For debugging raft-test itself or its consumers.
+	logger hclog.Logger
+
+	// ID of the raft server we're observing.
+	id raft.ServerID
+
+	// Reference to the Config.NotifyCh object set in this server's Config.
+	notifyCh chan bool
+
+	// Channel used to tell the notification loop to expect the server to
+	// acquire leadership. The leadership future sent to this channel will
+	// be used both for notifying that leadership was acquired.
+	futureCh chan *Future
+
+	// Channel used to tell the notification loop to ignore any
+	// notification received from the notifyCh.
+	ignoreCh chan struct{}
+
+	// Stop observing leadership changes when this channel gets closed.
+	shutdownCh chan struct{}
+}
+
+// Create a new notifier.
+func newNotifier(logger hclog.Logger, id raft.ServerID, notifyCh chan bool) *notifier {
+	observer := &notifier{
+		logger:     logger,
+		id:         id,
+		notifyCh:   notifyCh,
+		futureCh:   make(chan *Future),
+		ignoreCh:   make(chan struct{}),
+		shutdownCh: make(chan struct{}),
+	}
+	go observer.start()
+	return observer
+}
+
+// Ignore any notifications received on the notifyCh.
+func (n *notifier) Ignore() {
+	close(n.ignoreCh)
+}
+
+// Close stops observing leadership changes.
+func (n *notifier) Close() {
+	n.shutdownCh <- struct{}{}
+	<-n.shutdownCh
+}
+
+// Acquired returns a Leadership object when the server acquires leadership, or
+// an error if the timeout expires.
+//
+// It must be called before this server has any chance to become leader
+// (e.g. it's disconnected from the other servers).
+//
+// Once called, it must not be called again until leadership is lost.
+func (n *notifier) Acquired(timeout time.Duration) *Future {
+	future := newFuture(n.id, timeout)
+	n.futureCh <- future
+	return future
+}
+
+// Start observing leadership changes using the notify channel of our server
+// and eed notification to our consumers.
+//
+// The loop will be terminated once the stopCh is closed.
+func (n *notifier) start() {
+	// Record the last leadership change observation. For asserting that a
+	// leadership lost notification always follows a leadership acquired
+	// one.
+	var last bool
+
+	// Record the last request for leadership change for this server, if
+	// any.
+	var future *Future
+	for {
+		select {
+		case f := <-n.futureCh:
+			if future != nil {
+				panic(fmt.Sprintf("server %s: duplicate leadership request", n.id))
+			}
+			future = f
+		case acquired := <-n.notifyCh:
+			ignore := false
+			select {
+			case <-n.ignoreCh:
+				// Just drop the notification on the floor.
+				ignore = true
+			default:
+			}
+			if ignore {
+				break
+			}
+			if future == nil {
+				panic(fmt.Sprintf("server %s: unexpected leadership change", n.id))
+			}
+			verb := ""
+			var ch chan struct{}
+			if acquired {
+				verb = "acquired"
+				ch = future.acquiredCh
+			} else {
+				verb = "lost"
+				ch = future.lostCh
+				future = nil
+
+			}
+			if acquired == last {
+				panic(fmt.Sprintf("server %s %s leadership twice in a row", n.id, verb))
+			}
+			last = acquired
+			n.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: leadership: %s", n.id, verb))
+			select {
+			case <-ch:
+				panic(fmt.Sprintf("server %s: duplicate leadership %s notification", n.id, verb))
+			default:
+				close(ch)
+			}
+		case <-n.shutdownCh:
+			n.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: leadership: stop watching", n.id))
+			close(n.shutdownCh)
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/election/tracker.go b/vendor/github.com/CanonicalLtd/raft-test/internal/election/tracker.go
new file mode 100644
index 0000000000..e8090380f1
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/election/tracker.go
@@ -0,0 +1,112 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package election
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Tracker consumes the raft.Config.NotifyCh set on each server of a cluster,
+// tracking when elections occur.
+type Tracker struct {
+	// For debugging raft-test itself or its consumers.
+	logger hclog.Logger
+
+	// Watchers for individual servers.
+	//
+	// Note that this map is not protected by a mutex, since it should be
+	// written once when the cluster is created, and never written again.
+	observers map[raft.ServerID]*notifier
+
+	// Flag indicating if Acquired() has been called on this Observer. It's
+	// used to as sanity check that Add() is not called after the first
+	// call to Acquired().
+	observing bool
+
+	// Current leadership future, if any. It's used as sanity check to
+	// prevent further leadership requests.
+	future *Future
+
+	// Serialize access to internal state.
+	mu sync.Mutex
+}
+
+// NewTracker creates a new Tracker for watching leadership
+// changes in a raft cluster.
+func NewTracker(logger hclog.Logger) *Tracker {
+	return &Tracker{
+		logger:    logger,
+		observers: make(map[raft.ServerID]*notifier),
+	}
+}
+
+// Ignore stops propagating leadership change notifications, which will be
+// simply dropped on the floor. Should be called before the final Close().
+func (t *Tracker) Ignore() {
+	for _, observer := range t.observers {
+		observer.Ignore()
+	}
+}
+
+// Close stops watching for leadership changes in the cluster.
+func (t *Tracker) Close() {
+	for _, observer := range t.observers {
+		observer.Close()
+	}
+}
+
+// Track leadership changes on the server with the given ID using the given
+// Config.NotifyCh.
+func (t *Tracker) Track(id raft.ServerID, notifyCh chan bool) {
+	if t.observing {
+		panic("can't track new server while observing")
+	}
+	if _, ok := t.observers[id]; ok {
+		panic(fmt.Sprintf("an observer for server %s is already registered", id))
+	}
+	t.observers[id] = newNotifier(t.logger, id, notifyCh)
+}
+
+// Expect returns an election Future object whose Done() method will return
+// a Leadership object when the server with the given ID acquires leadership,
+// or an error if the given timeout expires.
+//
+// It must be called before this server has any chance to become leader
+// (e.g. it's disconnected from the other servers).
+//
+// Once called, it must not be called again until leadership is lost.
+func (t *Tracker) Expect(id raft.ServerID, timeout time.Duration) *Future {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.observing = true
+
+	if t.future != nil {
+		select {
+		case <-t.future.lostCh:
+			// Leadership was acquired, but has been lost, so let's proceed.
+			t.future = nil
+		default:
+			panic(fmt.Sprintf("server %s has already requested leadership", t.future.id))
+		}
+	}
+
+	t.future = t.observers[id].Acquired(timeout)
+	return t.future
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/event/event.go b/vendor/github.com/CanonicalLtd/raft-test/internal/event/event.go
new file mode 100644
index 0000000000..9f73987ab4
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/event/event.go
@@ -0,0 +1,54 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package event
+
+// An Event that occurrs when certain log command is either enqueued, appended
+// or committed. Events may be fired in the transport layer (i.e. in the
+// eventTransport wrappers) or in the state machine layer (i.e. in the eventFSM
+// wrapper).
+type Event struct {
+	fireCh chan struct{}
+	ackCh  chan struct{}
+}
+
+// New creates a new event.
+func New() *Event {
+	return &Event{
+		fireCh: make(chan struct{}),
+		ackCh:  make(chan struct{}),
+	}
+}
+
+// Watch the event. Return a channel that gets closed when the event gets
+// fired.
+func (e *Event) Watch() <-chan struct{} {
+	return e.fireCh
+}
+
+// Fire the event. A watcher on the event will be awaken.
+func (e *Event) Fire() {
+	close(e.fireCh)
+}
+
+// Block until the watcher of the event has acknowledged that the event has
+// been handled.
+func (e *Event) Block() {
+	<-e.ackCh
+}
+
+// Ack acknowledges that the event has been handled.
+func (e *Event) Ack() {
+	close(e.ackCh)
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/fsms/watcher.go b/vendor/github.com/CanonicalLtd/raft-test/internal/fsms/watcher.go
new file mode 100644
index 0000000000..f10aa1bffd
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/fsms/watcher.go
@@ -0,0 +1,77 @@
+// Copyright 2017 Canonical Ld.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fsms
+
+import (
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Watcher watches all FSMs of a cluster, firing events at certain moments.
+type Watcher struct {
+	logger hclog.Logger
+
+	// FSM wrappers.
+	fsms map[raft.ServerID]*fsmWrapper
+}
+
+// New create a new FSMs watcher for watching the underlying FSMs.
+func New(logger hclog.Logger) *Watcher {
+	return &Watcher{
+		logger: logger,
+		fsms:   make(map[raft.ServerID]*fsmWrapper),
+	}
+}
+
+// Add an FSM to the watcher. Returns an FSM that wraps the given FSM with
+// instrumentation for firing events.
+func (w *Watcher) Add(id raft.ServerID, fsm raft.FSM) raft.FSM {
+	w.fsms[id] = newFSMWrapper(w.logger, id, fsm)
+	return w.fsms[id]
+}
+
+// WhenApplied returns an event that will fire when the n'th command log for
+// the term is applied on the FSM associated with the server with the given
+// ID. It's that such server is currently the leader.
+func (w *Watcher) WhenApplied(id raft.ServerID, n uint64) *event.Event {
+	return w.fsms[id].whenApplied(n)
+}
+
+// Commands returns the total number of command logs applied by the FSM of
+// the server with the given ID.
+func (w *Watcher) Commands(id raft.ServerID) uint64 {
+	return w.fsms[id].Commands()
+}
+
+// Snapshots returns the total number of snapshots performed by the FSM of the
+// server with the given ID.
+func (w *Watcher) Snapshots(id raft.ServerID) uint64 {
+	return w.fsms[id].Snapshots()
+}
+
+// Restores returns the total number of restores performed by the FSM of the
+// server with the given ID.
+func (w *Watcher) Restores(id raft.ServerID) uint64 {
+	return w.fsms[id].Restores()
+}
+
+// Electing must be called whenever the given server is about to transition to
+// the leader state, and before any new command log is applied.
+//
+// It resets the internal state of the FSN, such the the commands counter.
+func (w *Watcher) Electing(id raft.ServerID) {
+	w.fsms[id].electing()
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/fsms/wrapper.go b/vendor/github.com/CanonicalLtd/raft-test/internal/fsms/wrapper.go
new file mode 100644
index 0000000000..ea86a49550
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/fsms/wrapper.go
@@ -0,0 +1,188 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fsms
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+	"github.com/pkg/errors"
+)
+
+// Wraps a raft.FSM, adding control on logs, snapshots and restores.
+type fsmWrapper struct {
+	logger hclog.Logger
+
+	// ID of of the raft server associated with this FSM.
+	id raft.ServerID
+
+	// Wrapped FSM
+	fsm raft.FSM
+
+	// Total number of commands applied by this FSM.
+	commands uint64
+
+	// Total number of snapshots performed on this FSM.
+	snapshots uint64
+
+	// Total number of restores performed on this FSM.
+	restores uint64
+
+	// Events that should be fired when a certain command log is events.
+	events map[uint64][]*event.Event
+
+	mu sync.RWMutex
+}
+
+func newFSMWrapper(logger hclog.Logger, id raft.ServerID, fsm raft.FSM) *fsmWrapper {
+	return &fsmWrapper{
+		logger: logger,
+		id:     id,
+		fsm:    fsm,
+		events: make(map[uint64][]*event.Event),
+	}
+}
+
+func (f *fsmWrapper) Apply(log *raft.Log) interface{} {
+	result := f.fsm.Apply(log)
+
+	f.mu.Lock()
+	f.commands++
+	f.mu.Unlock()
+
+	f.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: fsm %s: applied %d", f.id, f.commands))
+	if events, ok := f.events[f.commands]; ok {
+		for _, event := range events {
+			event.Fire()
+			event.Block()
+		}
+	}
+
+	return result
+}
+
+// Snapshot always return a dummy snapshot and no error without doing
+// anything.
+func (f *fsmWrapper) Snapshot() (raft.FSMSnapshot, error) {
+	snapshot, err := f.fsm.Snapshot()
+
+	if snapshot != nil {
+		f.mu.Lock()
+		f.snapshots++
+		snapshot = &fsmSnapshotWrapper{
+			commands: f.commands,
+			snapshot: snapshot,
+		}
+		f.mu.Unlock()
+	}
+
+	return snapshot, err
+}
+
+// Restore always return a nil error without reading anything from
+// the reader.
+func (f *fsmWrapper) Restore(reader io.ReadCloser) error {
+	if err := binary.Read(reader, binary.LittleEndian, &f.commands); err != nil {
+		return errors.Wrap(err, "failed to restore commands count")
+	}
+	if err := f.fsm.Restore(reader); err != nil {
+		return errors.Wrap(err, "failed to perform restore on user's FSM")
+	}
+
+	if events, ok := f.events[f.commands]; ok {
+		for _, event := range events {
+			event.Fire()
+			event.Block()
+		}
+	}
+
+	f.mu.Lock()
+	f.restores++
+	f.mu.Unlock()
+
+	return nil
+}
+
+// This method must be called whenever the server associated with this FSM is
+// about to transition to the leader state, and before any new command log is
+// applied.
+//
+// It resets the internal state of the fsm, such as the list of applied command
+// logs and the scheduled events.
+func (f *fsmWrapper) electing() {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	for n := range f.events {
+		delete(f.events, n)
+	}
+}
+
+// Return an event that will fire when the n'th command log for the term is
+// applied on this FSM. It's assumed that this FSM is associated with the
+// current leader.
+func (f *fsmWrapper) whenApplied(n uint64) *event.Event {
+	e := event.New()
+	f.mu.RLock()
+	defer f.mu.RUnlock()
+	if f.commands >= n {
+		// Fire immediately.
+		go e.Fire()
+	} else {
+		_, ok := f.events[n]
+		if !ok {
+			f.events[n] = make([]*event.Event, 0)
+		}
+		f.events[n] = append(f.events[n], e)
+	}
+	return e
+}
+
+// Return the total number of command logs applied by this FSM.
+func (f *fsmWrapper) Commands() uint64 {
+	return f.commands
+}
+
+// Return the total number of snapshots performed by this FSM.
+func (f *fsmWrapper) Snapshots() uint64 {
+	return f.snapshots
+}
+
+// Return the total number of restores performed by this FSM.
+func (f *fsmWrapper) Restores() uint64 {
+	return f.restores
+}
+
+type fsmSnapshotWrapper struct {
+	commands uint64
+	snapshot raft.FSMSnapshot
+}
+
+func (s *fsmSnapshotWrapper) Persist(sink raft.SnapshotSink) error {
+	// Augment the snapshot with the current command count.
+	if err := binary.Write(sink, binary.LittleEndian, s.commands); err != nil {
+		return errors.Wrap(err, "failed to augment snapshot with commands count")
+	}
+	if err := s.snapshot.Persist(sink); err != nil {
+		return errors.Wrap(err, "failed to perform snapshot on user's FSM")
+	}
+	return nil
+}
+
+func (s *fsmSnapshotWrapper) Release() {}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/logging/logger.go b/vendor/github.com/CanonicalLtd/raft-test/internal/logging/logger.go
new file mode 100644
index 0000000000..07ec09f63c
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/logging/logger.go
@@ -0,0 +1,50 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+	"testing"
+
+	"github.com/hashicorp/logutils"
+	"github.com/hashicorp/go-hclog"
+)
+
+// New returns a standard hclog.Logger that will write entries at or above the
+// specified level to the testing log.
+func New(t testing.TB, level logutils.LogLevel) hclog.Logger {
+	filter := &logutils.LevelFilter{
+		Levels:   []logutils.LogLevel{"DEBUG", "WARN", "ERROR", "INFO"},
+		MinLevel: level,
+		Writer:   &testingWriter{t},
+	}
+
+	return hclog.New(&hclog.LoggerOptions{
+		Name: "raft-test",
+		Output: filter,
+	})
+}
+
+// Implement io.Writer and forward what it receives to a
+// testing logger.
+type testingWriter struct {
+	t testing.TB
+}
+
+// Write a single log entry. It's assumed that p is always a \n-terminated UTF
+// string.
+func (w *testingWriter) Write(p []byte) (n int, err error) {
+	w.t.Logf(string(p))
+	return len(p), nil
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/network/logs.go b/vendor/github.com/CanonicalLtd/raft-test/internal/network/logs.go
new file mode 100644
index 0000000000..df88af9e31
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/network/logs.go
@@ -0,0 +1,76 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package network
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/raft"
+)
+
+// Return a string representation of the given log entries.
+func stringifyLogs(logs []*raft.Log) string {
+	n := len(logs)
+	description := fmt.Sprintf("%d ", n)
+	if n == 1 {
+		description += "entry"
+	} else {
+		description += "entries"
+	}
+
+	if n > 0 {
+		entries := make([]string, n)
+		for i, log := range logs {
+			name := "Other"
+			switch log.Type {
+			case raft.LogCommand:
+				name = "Command"
+			case raft.LogNoop:
+				name = "Noop"
+			}
+			entries[i] = fmt.Sprintf("%s:term=%d,index=%d", name, log.Term, log.Index)
+		}
+		description += fmt.Sprintf(" [%s]", strings.Join(entries, " "))
+	}
+
+	return description
+}
+
+// This function takes a set of log entries that have been successfully
+// appended to a peer and filters out any log entry with an older term relative
+// to the others.
+//
+// The returned entries are guaranted to have the same term and that term is
+// the highest among the ones in this batch.
+func filterLogsWithOlderTerms(logs []*raft.Log) []*raft.Log {
+	// Find the highest term.
+	var highestTerm uint64
+	for _, log := range logs {
+		if log.Term > highestTerm {
+			highestTerm = log.Term
+		}
+	}
+
+	// Discard any log with an older term than the highest one.
+	filteredLogs := make([]*raft.Log, 0)
+	for _, log := range logs {
+		if log.Term == highestTerm {
+			filteredLogs = append(filteredLogs, log)
+		}
+	}
+
+	return filteredLogs
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/network/network.go b/vendor/github.com/CanonicalLtd/raft-test/internal/network/network.go
new file mode 100644
index 0000000000..b41cc6e0a2
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/network/network.go
@@ -0,0 +1,147 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package network
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Network provides control over all transports of a cluster, injecting
+// disconnections and failures.
+type Network struct {
+	logger hclog.Logger
+
+	// Transport wrappers.
+	transports map[raft.ServerID]*eventTransport
+}
+
+// New create a new network for controlling the underlying transports.
+func New(logger hclog.Logger) *Network {
+	return &Network{
+		logger:     logger,
+		transports: make(map[raft.ServerID]*eventTransport),
+	}
+}
+
+// Add a new transport to the network. Returns a transport that wraps the given
+// transport with instrumentation to inject disconnections and failures.
+func (n *Network) Add(id raft.ServerID, trans raft.Transport) raft.Transport {
+	transport := newEventTransport(n.logger, id, trans)
+
+	for _, other := range n.transports {
+		transport.AddPeer(other)
+		other.AddPeer(transport)
+	}
+
+	n.transports[id] = transport
+	return transport
+}
+
+// Electing resets any leader-related state in the transport associated with
+// given server ID (such as the track of logs appended by the peers), and it
+// connects the transport to all its peers, enabling it to send them RPCs. It
+// must be called whenever the server associated with this transport is about
+// to transition to the leader state, and before any append entries RPC is
+// made.
+func (n *Network) Electing(id raft.ServerID) {
+	n.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: establish outbound connection to all other nodes", id))
+
+	// Sanity check that the network is fully disconnected at this time.
+	for id, transport := range n.transports {
+		if transport.Connected() {
+			panic(fmt.Sprintf("expected a fully disconected network, but server %s is connected", id))
+		}
+	}
+
+	transport := n.transports[id]
+	transport.Electing()
+}
+
+// Deposing disables connectivity from the transport of the server with the
+// given ID to all its peers, allowing only append entries RPCs for peers that
+// are lagging behind in terms of applied logs to be performed.
+func (n *Network) Deposing(id raft.ServerID) {
+	n.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: dropping outbound connection to all other nodes", id))
+	n.transports[id].Deposing()
+}
+
+// ConnectAllServers establishes full cluster connectivity after an
+// election. The given ID is the one of the leader, which is already connected.
+func (n *Network) ConnectAllServers(id raft.ServerID) {
+	// Sanity check that the network is fully disconnected at this time.
+	for other, transport := range n.transports {
+		if other == id {
+			continue
+		}
+		transport.peers.Connect()
+	}
+}
+
+// Disconnect disables connectivity from the transport of the leader
+// server with the given ID to the peer with the given ID.
+func (n *Network) Disconnect(id, follower raft.ServerID) {
+	n.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: disconnecting follower %s", id, follower))
+	n.transports[id].Disconnect(follower)
+}
+
+// Reconnect re-enables connectivity from the transport of the leader
+// server with the given ID to the peer with the given ID.
+func (n *Network) Reconnect(id, follower raft.ServerID) {
+	n.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: reconnecting follower %s", id, follower))
+	n.transports[id].Reconnect(follower)
+}
+
+// PeerConnected returns whether the peer with the given server ID is connected
+// with the transport of the server with the given ID.
+func (n *Network) PeerConnected(id, peer raft.ServerID) bool {
+	return n.transports[id].PeerConnected(peer)
+}
+
+// Address returns the address of the server with the given id.
+func (n *Network) Address(id raft.ServerID) raft.ServerAddress {
+	return n.transports[id].LocalAddr()
+}
+
+// HasAppendedLogsFromTo returns true if at least one log entry has been appended
+// by server with id1 to server with id2.
+//
+// It is assumed that id1 is a leader that has just been elected and has been
+// trying to append a noop log to all its followers.
+func (n *Network) HasAppendedLogsFromTo(id1, id2 raft.ServerID) bool {
+	transport := n.transports[id1]
+	return transport.HasAppendedLogsTo(id2)
+}
+
+// ScheduleEnqueueFailure will make all followers of the given server fail when
+// the leader tries to append the n'th log command. Return an event that will
+// fire when all of them have failed and will block them all until
+// acknowledged.
+func (n *Network) ScheduleEnqueueFailure(id raft.ServerID, command uint64) *event.Event {
+	transport := n.transports[id]
+	return transport.ScheduleEnqueueFailure(command)
+}
+
+// ScheduleAppendFailure will make all followers of the given leader server
+// append the n'th log command sent by the leader, but they will fail to
+// acknowledge the leader about it. Return an event that will fire when all of
+// them have failed and will block them all until acknowledged.
+func (n *Network) ScheduleAppendFailure(id raft.ServerID, command uint64) *event.Event {
+	transport := n.transports[id]
+	return transport.ScheduleAppendFailure(command)
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/network/peers.go b/vendor/github.com/CanonicalLtd/raft-test/internal/network/peers.go
new file mode 100644
index 0000000000..a386c2982a
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/network/peers.go
@@ -0,0 +1,307 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package network
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/hashicorp/raft"
+)
+
+// Small wrapper around a map of raft.ServerID->peer, offering concurrency
+// safety. This bit of information is not on faultyTransport directly, since it
+// needs to be shared between faultyTransport and faultyPipeline.
+type peers struct {
+	peers map[raft.ServerID]*peer
+	mu    sync.RWMutex
+}
+
+// Create a new empty peers map.
+func newPeers() *peers {
+	return &peers{
+		peers: make(map[raft.ServerID]*peer),
+	}
+}
+
+// Add a new peer for the given source and target server IDs.
+func (p *peers) Add(source, target raft.ServerID) {
+	p.peers[target] = newPeer(source, target)
+}
+
+// Get the peer with the given ID.
+func (p *peers) Get(id raft.ServerID) *peer {
+	// Sinces peers entries are inserted at initialization time by the
+	// Cluster() function, and currently they never change afterwise,
+	// there's no need to protect this method with the mutex.
+	return p.peers[id]
+}
+
+// Return all the peers
+func (p *peers) All() map[raft.ServerID]*peer {
+	// Sinces peers entries are inserted at initialization time by the
+	// Cluster() function, and currently they never change afterwise,
+	// there's no need to protect this method with the mutex.
+	return p.peers
+}
+
+// Enable connectivity to all the peers in this map.
+func (p *peers) Connect() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	for _, peer := range p.peers {
+		peer.Connect()
+	}
+}
+
+// Returns true if all peers are connected, false otherwise.
+//
+// It panics if some nodes are connected and others are not.
+func (p *peers) Connected() bool {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	connected := false
+	for id, peer := range p.peers {
+		if !connected {
+			connected = peer.Connected()
+		} else if !peer.Connected() {
+			panic(fmt.Sprintf("server %s is not not connected while some others are", id))
+		}
+	}
+	return connected
+}
+
+// Disable connectivity to all the peers in this map. However allow for peers
+// that are lagging behind in terms of received entries to still receive
+// AppendEntries RPCs.
+func (p *peers) SoftDisconnect() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	for _, peer := range p.peers {
+		peer.SoftDisconnect()
+	}
+}
+
+// Whether the given target peer is both disconnected from its source
+// transport, and it's not syncing logs with other peers (i.e. either they are
+// at the same index of the peer with the highest index of appended logs, or
+// the peer has been hard-disconnected)
+func (p *peers) DisconnectedAndNotSyncing(id raft.ServerID) bool {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	for _, peer := range p.peers {
+		peer.mu.RLock()
+		defer peer.mu.RUnlock()
+	}
+
+	this := p.peers[id]
+	if this.connected {
+		return false
+	}
+
+	if !this.allowSyncing {
+		return true
+	}
+
+	count := this.LogsCount()
+
+	for _, other := range p.peers {
+		if other.target == this.target {
+			continue
+		}
+		if count < other.LogsCount() {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Hold information about a single peer server that a faultyTransport is
+// sending RPCs to.
+type peer struct {
+	// Server ID of the server sending RPCs to the peer.
+	source raft.ServerID
+
+	// Server ID of the peer server.
+	target raft.ServerID
+
+	// Whether connectivity is up. The transport can send RPCs to the peer
+	// server only if this value is true.
+	connected bool
+
+	// Whether to allow appending entries to this peer even if the
+	// connected field is false. Used for bringing the logs appended by a
+	// peer in sync with the others.
+	allowSyncing bool
+
+	// Logs successfully appended to this peer since the server of the
+	// transport we're associated with has acquired leadership. This keeps
+	// only logs tagged with the same term the leader was elected at.
+	logs []*raft.Log
+
+	// Serialize access to internal state.
+	mu sync.RWMutex
+}
+
+// Create a new peer for the given server.
+func newPeer(source, target raft.ServerID) *peer {
+	return &peer{
+		target: target,
+		logs:   make([]*raft.Log, 0),
+	}
+}
+
+// Enable connectivity between the source transport and the target peer.
+func (p *peer) Connect() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.connected {
+		panic(fmt.Sprintf("server %s is already connected with server %s", p.source, p.target))
+	}
+	p.connected = true
+	p.allowSyncing = false
+}
+
+// Disable connectivity between the source transport and the target
+// peer.
+func (p *peer) Disconnect() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if !p.connected {
+		panic(fmt.Sprintf("server %s is already disconnected from server %s", p.source, p.target))
+	}
+	p.connected = false
+	p.allowSyncing = false
+}
+
+// Re-enables connectivity between the source transport and the target
+// peer.
+func (p *peer) Reconnect() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.connected {
+		panic(fmt.Sprintf("server %s is already connected with server %s", p.source, p.target))
+	}
+	p.connected = true
+	p.allowSyncing = false
+}
+
+// Disable connectivity between the source transport and the target
+// peer. However allow for peers that are lagging behind in terms of received
+// entries to still receive AppendEntries RPCs.
+func (p *peer) SoftDisconnect() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if !p.connected {
+		panic(fmt.Sprintf("server %s is already disconnected from server %s", p.source, p.target))
+	}
+	p.connected = false
+	p.allowSyncing = true
+}
+
+// Return whether this source transport is connected to the target peer.
+func (p *peer) Connected() bool {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.connected
+}
+
+// Reset all recorded logs. Should be called when a new server is elected.
+func (p *peer) ResetLogs() {
+	p.logs = p.logs[:0]
+}
+
+// This method updates the logs that the peer successfully appended. It must be
+// called whenever the transport is confident that logs have been
+// appended. There are two cases:
+//
+// - Transport.AppendEntries(): this is synchronous so UpdateLogs() can be invoked
+//   as soon as the AppendEntries() call returns.
+//
+// - AppendPipeline.AppendEntries(): this is asynchronous, so UpdateLogs() should
+//   be invoked only when the AppendFuture returned by AppendEntries() completes.
+//
+// In practice, the current implementation of faultyTransport and
+// faultyPipeline is a bit sloppy about the above rules, since we can make some
+// assumptions about the flow of entries. See comments in faultyTransport and
+// faultyPipeline for more details.
+func (p *peer) UpdateLogs(logs []*raft.Log) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if len(logs) == 0 {
+		return // Nothing to do.
+	}
+
+	// Discard any log with an older term (relative to the others).
+	newLogs := filterLogsWithOlderTerms(logs)
+
+	// If no logs have been received yet, just append everything.
+	if len(p.logs) == 0 {
+		p.logs = newLogs
+		return
+	}
+
+	// Check if we have stored entries for older terms, and if so, discard
+	// them.
+	//
+	// We only need to check the first entry, because we always store
+	// entries that all have the same term.
+	if p.logs[0].Term < newLogs[0].Term {
+		p.logs = p.logs[:0]
+	}
+
+	// Append new logs that aren't duplicates.
+	for _, newLog := range newLogs {
+		duplicate := false
+		for _, log := range p.logs {
+			if newLog.Index == log.Index {
+				duplicate = true
+				break
+			}
+		}
+		if duplicate {
+			continue
+		}
+		p.logs = append(p.logs, newLog)
+	}
+}
+
+// Return then number of all logs appended so far to this peer.
+func (p *peer) LogsCount() int {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return len(p.logs)
+}
+
+// Return then number of command logs appended so far to this peer.
+func (p *peer) CommandLogsCount() uint64 {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	n := uint64(0)
+	for _, log := range p.logs {
+		if log.Type == raft.LogCommand {
+			n++
+		}
+	}
+	return n
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/network/pipeline.go b/vendor/github.com/CanonicalLtd/raft-test/internal/network/pipeline.go
new file mode 100644
index 0000000000..29d77e56f8
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/network/pipeline.go
@@ -0,0 +1,166 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package network
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Wrap a regular raft.AppendPipeline, adding support for triggering events at
+// specific times.
+type eventPipeline struct {
+	logger hclog.Logger
+
+	// Server ID sending RPCs.
+	source raft.ServerID
+
+	// Server ID this pipeline is sending RPCs to.
+	target raft.ServerID
+
+	// Regular pipeline that we are wrapping.
+	pipeline raft.AppendPipeline
+
+	// All other peers connected to our transport. Syncing logs after a
+	// disconnection.
+	peers *peers
+
+	// Fault that should happen in this transport during a term.
+	schedule *schedule
+
+	// If non-zero, the pipeline will artificially return an error to its
+	// consumer when firing the response of a request whose entries contain
+	// a log with this index. This happens after the peer as actually
+	// appended the request's entries and it effectively simulates a
+	// follower disconnecting before it can acknowledge the leader of a
+	// successful request.
+	failure uint64
+
+	// To stop the pipeline.
+	shutdownCh chan struct{}
+}
+
+// AppendEntries is used to add another request to the pipeline.
+// The send may block which is an effective form of back-pressure.
+func (p *eventPipeline) AppendEntries(
+	args *raft.AppendEntriesRequest, resp *raft.AppendEntriesResponse) (raft.AppendFuture, error) {
+
+	p.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: pipeline: append to %s: %s", p.source, p.target, stringifyLogs(args.Entries)))
+
+	peer := p.peers.Get(p.target)
+	faulty := false
+	if p.schedule != nil {
+		n := peer.CommandLogsCount()
+		args, faulty = p.schedule.FilterRequest(n, args)
+		if faulty && p.schedule.IsEnqueueFault() {
+			p.logger.Debug(fmt.Sprintf(
+				"[DEBUG] raft-test: server %s: pipeline: append to: %s: enqueue fault: command %d", p.source, p.target, p.schedule.Command()))
+		}
+	}
+
+	if p.peers.DisconnectedAndNotSyncing(p.target) {
+		p.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: pipeline: append to %s: not connected", p.source, p.target))
+		return nil, fmt.Errorf("cannot reach server %s", p.target)
+	}
+
+	if faulty && p.schedule.IsAppendFault() {
+		p.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: pipeline: append to %s: append fault: command %d", p.source, p.target, p.schedule.n))
+		p.failure = args.Entries[0].Index
+	}
+
+	future, err := p.pipeline.AppendEntries(args, resp)
+	if err != nil {
+		return nil, err
+	}
+	peer.UpdateLogs(args.Entries)
+
+	if faulty && p.schedule.IsEnqueueFault() {
+		p.schedule.OccurredOn(p.target)
+		p.schedule.event.Block()
+		return nil, fmt.Errorf("cannot reach server %s", p.target)
+	}
+
+	return &appendFutureWrapper{
+		id:     p.target,
+		future: future,
+	}, nil
+}
+
+// Consumer returns a channel that can be used to consume
+// response futures when they are ready.
+func (p *eventPipeline) Consumer() <-chan raft.AppendFuture {
+	ch := make(chan raft.AppendFuture)
+
+	go func() {
+		for {
+			select {
+			case future := <-p.pipeline.Consumer():
+				entries := future.Request().Entries
+				fail := false
+				if len(entries) > 0 && entries[0].Index == p.failure {
+					fail = true
+				}
+				if fail {
+					p.schedule.OccurredOn(p.target)
+					p.schedule.event.Block()
+					future = &appendFutureWrapper{id: p.target, future: future, failing: true}
+				}
+				ch <- future
+			case <-p.shutdownCh:
+				return
+			}
+		}
+	}()
+	return ch
+
+}
+
+// Close closes the pipeline and cancels all inflight RPCs
+func (p *eventPipeline) Close() error {
+	err := p.pipeline.Close()
+	close(p.shutdownCh)
+	return err
+}
+
+type appendFutureWrapper struct {
+	id      raft.ServerID
+	future  raft.AppendFuture
+	failing bool
+}
+
+func (f *appendFutureWrapper) Error() error {
+	if f.failing {
+		return fmt.Errorf("cannot reach server %s", f.id)
+	}
+	return f.future.Error()
+}
+
+func (f *appendFutureWrapper) Start() time.Time {
+	return f.future.Start()
+}
+
+func (f *appendFutureWrapper) Request() *raft.AppendEntriesRequest {
+	return f.future.Request()
+}
+func (f *appendFutureWrapper) Response() *raft.AppendEntriesResponse {
+	response := f.future.Response()
+	if f.failing {
+		response.Success = false
+	}
+	return response
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/network/schedule.go b/vendor/github.com/CanonicalLtd/raft-test/internal/network/schedule.go
new file mode 100644
index 0000000000..3b5c0293c5
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/network/schedule.go
@@ -0,0 +1,178 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package network
+
+import (
+	"sync"
+
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/hashicorp/raft"
+)
+
+// Schedule contains details about under when a certain event should occur.
+type schedule struct {
+	// List of peers that the event should occurr on.
+	peers []raft.ServerID
+
+	// The event should fire when the transport tries to append n'th
+	// command log command in this term.
+	n uint64
+
+	// Event object that should be fired when all peers have been trying to
+	// append the given command.
+	event *event.Event
+
+	// Track peers where the event already occurred.
+	occurred []bool
+
+	// If true, the event should occur after the command log has been
+	// appended to all followers.
+	append bool
+
+	// Serialize access to internal state.
+	mu sync.RWMutex
+}
+
+// Return a zero value fault that will never occurr.
+func newSchedule() *schedule {
+	return &schedule{}
+}
+
+// Add a server to the list of peers where the event should occurr.
+func (s *schedule) AddPeer(id raft.ServerID) {
+	s.peers = append(s.peers, id)
+	s.occurred = append(s.occurred, false)
+}
+
+// Resets this fault to not occur.
+func (s *schedule) NoEvent() {
+	s.n = 0
+	s.event = nil
+	for i := range s.occurred {
+		s.occurred[i] = false
+	}
+	s.append = false
+}
+
+// Configure this scheduler to fire the given event when the append entries RPC to
+// apply the n'th command log has failed on all given peers.
+func (s *schedule) EnqueueFailure(n uint64, event *event.Event) {
+	s.n = n
+	s.event = event
+	for i := range s.occurred {
+		s.occurred[i] = false
+	}
+}
+
+// Configure this scheduler to fire the given event after the n'th command log has
+// been appended by all peers but has a failed to be notified to all consumers.
+func (s *schedule) AppendFailure(n uint64, event *event.Event) {
+	s.n = n
+	s.event = event
+	for i := range s.occurred {
+		s.occurred[i] = false
+	}
+	s.append = true
+}
+
+// FilterRequest scans the entries in the given append request, to see whether they
+// contain the command log that this fault is supposed to trigger upon.
+//
+// The n parameter is the number of command logs successfully appended so far
+// in the current term.
+//
+// It returns a request object and a boolean value.
+//
+// If the fault should not be triggered by this request, the returned request
+// object is the same as the given one and the boolean value is false.
+//
+// If the fault should be be triggered by this request, the bolean value will
+// be true and for the returned request object the are two cases:
+//
+// 1) If this is an enqueue fault, the returned request object will have its
+//    Entries truncated to exclude the failing command log entry and every
+//    entry beyond that. This way all logs preceeding the failing command log
+//    will still be appended to the peer and the associated apply futures will
+//    succeed, although the failing command log won't be applied and its apply
+//    future will fail with ErrLeadershipLost.
+//
+// 1) If this is an append fault, the returned request object will be the same
+//    as the given one. This way all logs willl be appended to the peer,
+//    although the transport pretend that the append entries RPC has failed,
+//    simulating a disconnection when delivering the RPC reply.
+//
+func (s *schedule) FilterRequest(n uint64, args *raft.AppendEntriesRequest) (*raft.AppendEntriesRequest, bool) {
+	if s.n == 0 {
+		return args, false
+	}
+
+	for i, log := range args.Entries {
+		// Only consider command log entries.
+		if log.Type != raft.LogCommand {
+			continue
+		}
+		n++
+		if n != s.n {
+			continue
+		}
+
+		// We found a match.
+		if !s.append {
+			truncatedArgs := *args
+			truncatedArgs.Entries = args.Entries[:i]
+			args = &truncatedArgs
+		}
+		return args, true
+
+	}
+	return args, false
+}
+
+// Return the command log sequence number that should trigger this fault.
+//
+// For example if the fault was set to fail at the n'th command log appended
+// during the term, the n is returned.
+func (s *schedule) Command() uint64 {
+	return s.n
+}
+
+// Return true if this is an enqueue fault.
+func (s *schedule) IsEnqueueFault() bool {
+	return !s.append
+}
+
+// Return true if this is an append fault.
+func (s *schedule) IsAppendFault() bool {
+	return s.append
+}
+
+// Mark the fault as occurred on the given server, and fire the event if it has
+// occurred on all servers.
+func (s *schedule) OccurredOn(id raft.ServerID) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	for i, other := range s.peers {
+		if other == id {
+			s.occurred[i] = true
+		}
+	}
+
+	for _, flag := range s.occurred {
+		if !flag {
+			return
+		}
+	}
+	s.event.Fire()
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/internal/network/transport.go b/vendor/github.com/CanonicalLtd/raft-test/internal/network/transport.go
new file mode 100644
index 0000000000..599b83d06e
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/internal/network/transport.go
@@ -0,0 +1,268 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package network
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Wrap a regular raft.Transport, adding support for trigger events at
+// specific times.
+type eventTransport struct {
+	logger hclog.Logger
+
+	// ID of of the raft server associated with this transport.
+	id raft.ServerID
+
+	// The regular raft.Transport beging wrapped.
+	trans raft.Transport
+
+	// Track the peers we are sending RPCs to.
+	peers *peers
+
+	// Schedule and event that should happen in this transport during a
+	// term.
+	schedule *schedule
+}
+
+// Create a new transport wrapper..
+func newEventTransport(logger hclog.Logger, id raft.ServerID, trans raft.Transport) *eventTransport {
+	return &eventTransport{
+		logger:   logger,
+		id:       id,
+		trans:    trans,
+		peers:    newPeers(),
+		schedule: newSchedule(),
+	}
+}
+
+// Consumer returns a channel that can be used to
+// consume and respond to RPC requests.
+func (t *eventTransport) Consumer() <-chan raft.RPC {
+	return t.trans.Consumer()
+}
+
+// LocalAddr is used to return our local address to distinguish from our peers.
+func (t *eventTransport) LocalAddr() raft.ServerAddress {
+	return t.trans.LocalAddr()
+}
+
+// AppendEntriesPipeline returns an interface that can be used to pipeline
+// AppendEntries requests.
+func (t *eventTransport) AppendEntriesPipeline(
+	id raft.ServerID, target raft.ServerAddress) (raft.AppendPipeline, error) {
+
+	if t.peers.DisconnectedAndNotSyncing(id) {
+		t.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: transport: append to %s: not connected", t.id, id))
+		return nil, fmt.Errorf("cannot reach server %s", id)
+	}
+	if !t.peers.Get(id).Connected() {
+		t.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: transport: append to %s: syncing logs", t.id, id))
+	}
+
+	pipeline, err := t.trans.AppendEntriesPipeline(id, target)
+	if err != nil {
+		return nil, err
+	}
+
+	pipeline = &eventPipeline{
+		logger:     t.logger,
+		source:     t.id,
+		target:     id,
+		pipeline:   pipeline,
+		peers:      t.peers,
+		schedule:   t.schedule,
+		shutdownCh: make(chan struct{}),
+	}
+
+	return pipeline, nil
+}
+
+// AppendEntries sends the appropriate RPC to the target node.
+func (t *eventTransport) AppendEntries(
+	id raft.ServerID, target raft.ServerAddress, args *raft.AppendEntriesRequest,
+	resp *raft.AppendEntriesResponse) error {
+
+	peer := t.peers.Get(id)
+	t.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: transport: append to %s: %s", t.id, id, stringifyLogs(args.Entries)))
+
+	// If a fault is set, check if this batch of entries contains a command
+	// log matching the one configured in the fault.
+	faulty := false
+	if t.schedule != nil {
+		n := peer.CommandLogsCount()
+		args, faulty = t.schedule.FilterRequest(n, args)
+		if faulty && t.schedule.IsEnqueueFault() {
+			t.logger.Debug(fmt.Sprintf(
+				"[DEBUG] raft-test: server %s: transport: append to %s: enqueue fault: command %d", t.id, id, t.schedule.Command()))
+		}
+	}
+
+	if t.peers.DisconnectedAndNotSyncing(id) {
+		t.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: transport: append to %s: not connected", t.id, id))
+		return fmt.Errorf("cannot reach server %s", id)
+	}
+	if !t.peers.Get(id).Connected() {
+		t.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: transport: append to %s: syncing logs", t.id, id))
+	}
+
+	if err := t.trans.AppendEntries(id, target, args, resp); err != nil {
+		return err
+	}
+
+	// Check for a newer term, stop running
+	if resp.Term > args.Term {
+		t.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: server %s: transport: append to %s: newer term", t.id, id))
+	}
+
+	peer.UpdateLogs(args.Entries)
+
+	if faulty && t.schedule.IsEnqueueFault() {
+		t.schedule.OccurredOn(id)
+		t.schedule.event.Block()
+		return fmt.Errorf("cannot reach server %s", id)
+	}
+
+	return nil
+}
+
+// RequestVote sends the appropriate RPC to the target node.
+func (t *eventTransport) RequestVote(
+	id raft.ServerID, target raft.ServerAddress, args *raft.RequestVoteRequest,
+	resp *raft.RequestVoteResponse) error {
+
+	if !t.peers.Get(id).Connected() {
+		return fmt.Errorf("connectivity to server %s is down", id)
+	}
+
+	return t.trans.RequestVote(id, target, args, resp)
+}
+
+// InstallSnapshot is used to push a snapshot down to a follower. The data is read from
+// the ReadCloser and streamed to the client.
+func (t *eventTransport) InstallSnapshot(
+	id raft.ServerID, target raft.ServerAddress, args *raft.InstallSnapshotRequest,
+	resp *raft.InstallSnapshotResponse, data io.Reader) error {
+
+	if !t.peers.Get(id).Connected() {
+		return fmt.Errorf("connectivity to server %s is down", id)
+	}
+	return t.trans.InstallSnapshot(id, target, args, resp, data)
+}
+
+// EncodePeer is used to serialize a peer's address.
+func (t *eventTransport) EncodePeer(id raft.ServerID, addr raft.ServerAddress) []byte {
+	return t.trans.EncodePeer(id, addr)
+}
+
+// DecodePeer is used to deserialize a peer's address.
+func (t *eventTransport) DecodePeer(data []byte) raft.ServerAddress {
+	return t.trans.DecodePeer(data)
+}
+
+// SetHeartbeatHandler is used to setup a heartbeat handler
+// as a fast-pass. This is to avoid head-of-line blocking from
+// disk IO. If a Transport does not support this, it can simply
+// ignore the call, and push the heartbeat onto the Consumer channel.
+func (t *eventTransport) SetHeartbeatHandler(cb func(rpc raft.RPC)) {
+	t.trans.SetHeartbeatHandler(cb)
+}
+
+func (t *eventTransport) Close() error {
+	if closer, ok := t.trans.(raft.WithClose); ok {
+		return closer.Close()
+	}
+	return nil
+}
+
+// AddPeer adds a new transport as peer of this transport. Once the other
+// transport has become a peer, this transport will be able to send RPCs to it,
+// if the peer object 'connected' flag is on.
+func (t *eventTransport) AddPeer(transport *eventTransport) {
+	t.peers.Add(t.id, transport.id)
+	t.schedule.AddPeer(transport.id)
+}
+
+// Electing resets any leader-related state in this transport (such as the
+// track of logs appended by the peers), and it connects the transport to all
+// its peers, enabling it to send them RPCs. It must be called whenever the
+// server associated with this transport is about to transition to the leader
+// state, and before any append entries RPC is made.
+func (t *eventTransport) Electing() {
+	t.schedule.NoEvent()
+	for _, peer := range t.peers.All() {
+		peer.ResetLogs()
+	}
+	t.peers.Connect()
+}
+
+// Deposing disables connectivity from this transport to all its peers,
+// allowing only append entries RPCs for peers that are lagging behind in terms
+// of applied logs to be performed.
+func (t *eventTransport) Deposing() {
+	t.peers.SoftDisconnect()
+}
+
+// Disable connectivity from this transport to the given peer.
+func (t *eventTransport) Disconnect(id raft.ServerID) {
+	t.peers.Get(id).Disconnect()
+}
+
+// Re-nable connectivity from this transport to the given peer.
+func (t *eventTransport) Reconnect(id raft.ServerID) {
+	t.peers.Get(id).Reconnect()
+}
+
+// Returns true if all peers are connected, false otherwise.
+//
+// It panics if some nodes are connected and others are not.
+func (t *eventTransport) Connected() bool {
+	return t.peers.Connected()
+}
+
+// Returns true if the given peer is connected.
+func (t *eventTransport) PeerConnected(id raft.ServerID) bool {
+	return t.peers.Get(id).Connected()
+}
+
+// Returns true if this transport has appended logs to the given peer during
+// the term.
+func (t *eventTransport) HasAppendedLogsTo(id raft.ServerID) bool {
+	peer := t.peers.Get(id)
+	return peer.LogsCount() > 0
+}
+
+// Schedule the n'th command log to fail to be appended to the
+// followers. Return an event that will fire when all followers have reached
+// this failure.
+func (t *eventTransport) ScheduleEnqueueFailure(n uint64) *event.Event {
+	event := event.New()
+	t.schedule.EnqueueFailure(n, event)
+	return event
+}
+
+// Schedule the n'th command log to fail to acknowledge that it has been
+// appended to the followers. Return an event that will fire when all followers
+// have reached this failure.
+func (t *eventTransport) ScheduleAppendFailure(n uint64) *event.Event {
+	event := event.New()
+	t.schedule.AppendFailure(n, event)
+	return event
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/options.go b/vendor/github.com/CanonicalLtd/raft-test/options.go
new file mode 100644
index 0000000000..77e7feef98
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/options.go
@@ -0,0 +1,107 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"io/ioutil"
+	"time"
+
+	"github.com/hashicorp/go-hclog"
+	"github.com/hashicorp/raft"
+)
+
+// Config sets a hook for tweaking the raft configuration of individual nodes.
+func Config(f func(int, *raft.Config)) Option {
+	return func(nodes []*dependencies) {
+		for i, node := range nodes {
+			f(i, node.Conf)
+		}
+	}
+}
+
+// LogStore can be used to create custom log stores.
+//
+// The given function takes a node index as argument and returns the LogStore
+// that the node should use.
+func LogStore(factory func(int) raft.LogStore) Option {
+	return func(nodes []*dependencies) {
+		for i, node := range nodes {
+			node.Logs = factory(i)
+		}
+	}
+}
+
+// Transport can be used to create custom transports.
+//
+// The given function takes a node index as argument and returns the Transport
+// that the node should use.
+//
+// If the transports returned by the factory do not implement
+// LoopbackTransport, the Disconnect API won't work.
+func Transport(factory func(int) raft.Transport) Option {
+	return func(nodes []*dependencies) {
+		for i, node := range nodes {
+			node.Trans = factory(i)
+		}
+	}
+}
+
+// Latency is a convenience around Config that scales the values of the various
+// raft timeouts that would be set by default by Cluster.
+//
+// This option is orthogonal to the GO_RAFT_TEST_LATENCY environment
+// variable. If this option is used and GO_RAFT_TEST_LATENCY is set, they will
+// compound. E.g. passing a factor of 2.0 to this option and setting
+// GO_RAFT_TEST_LATENCY to 3.0 will have the net effect that default timeouts
+// are scaled by a factor of 6.0.
+func Latency(factor float64) Option {
+	return Config(func(i int, config *raft.Config) {
+		timeouts := []*time.Duration{
+			&config.HeartbeatTimeout,
+			&config.ElectionTimeout,
+			&config.LeaderLeaseTimeout,
+			&config.CommitTimeout,
+		}
+		for _, timeout := range timeouts {
+			*timeout = scaleDuration(*timeout, factor)
+		}
+	})
+}
+
+// DiscardLogger is a convenience around Config that sets the output stream of
+// raft's logger to ioutil.Discard.
+func DiscardLogger() Option {
+	return Config(func(i int, config *raft.Config) {
+		config.Logger = hclog.New(&hclog.LoggerOptions{
+			Name: "raft-test",
+			Output: ioutil.Discard})
+	})
+}
+
+// Servers can be used to indicate which nodes should be initially part of the
+// created cluster.
+//
+// If this option is not used, the default is to have all nodes be part of the
+// cluster.
+func Servers(indexes ...int) Option {
+	return func(nodes []*dependencies) {
+		for _, node := range nodes {
+			node.Voter = false
+		}
+		for _, index := range indexes {
+			nodes[index].Voter = true
+		}
+	}
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/server.go b/vendor/github.com/CanonicalLtd/raft-test/server.go
new file mode 100644
index 0000000000..035774cdb0
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/server.go
@@ -0,0 +1,36 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"testing"
+
+	"github.com/hashicorp/raft"
+)
+
+// Server is a convenience for creating a cluster with a single raft.Raft server
+// that immediately be elected as leader.
+//
+// The default network address of a test node is "0".
+//
+// Dependencies can be replaced or mutated using the various options.
+func Server(t *testing.T, fsm raft.FSM, options ...Option) (*raft.Raft, func()) {
+	fsms := []raft.FSM{fsm}
+
+	rafts, control := Cluster(t, fsms, options...)
+	control.Elect("0")
+
+	return rafts["0"], control.Close
+}
diff --git a/vendor/github.com/CanonicalLtd/raft-test/term.go b/vendor/github.com/CanonicalLtd/raft-test/term.go
new file mode 100644
index 0000000000..c60957e87f
--- /dev/null
+++ b/vendor/github.com/CanonicalLtd/raft-test/term.go
@@ -0,0 +1,219 @@
+// Copyright 2017 Canonical Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafttest
+
+import (
+	"fmt"
+
+	"github.com/CanonicalLtd/raft-test/internal/election"
+	"github.com/CanonicalLtd/raft-test/internal/event"
+	"github.com/hashicorp/raft"
+)
+
+// A Term holds information about an event that should happen while a certain
+// node is the leader.
+type Term struct {
+	control    *Control
+	id         raft.ServerID
+	leadership *election.Leadership
+	events     []*Event
+
+	// Server ID of a follower that has been disconnect.
+	disconnected raft.ServerID
+}
+
+// When can be used to schedule a certain action when a certain expected
+// event occurs in the cluster during this Term.
+func (t *Term) When() *Event {
+	// TODO: check that we're not using Connect()
+	t.control.t.Helper()
+
+	event := &Event{
+		term: t,
+	}
+
+	t.events = append(t.events, event)
+	return event
+}
+
+// Disconnect a follower, which will stop receiving RPCs.
+func (t *Term) Disconnect(id raft.ServerID) {
+	t.control.t.Helper()
+
+	if t.disconnected != "" {
+		t.control.t.Fatalf("raft-test: term: disconnecting more than one server is not supported")
+	}
+
+	if id == t.id {
+		t.control.t.Fatalf("raft-test: term: disconnect error: server %s is the leader", t.id)
+	}
+
+	t.control.logger.Debug(fmt.Sprintf("[DEBUG] raft-test: term: disconnect %s", id))
+
+	t.disconnected = id
+	t.control.network.Disconnect(t.id, id)
+}
+
+// Reconnect a previously disconnected follower.
+func (t *Term) Reconnect(id raft.ServerID) {
+	t.control.t.Helper()
+
+	if id != t.disconnected {
+		t.control.t.Fatalf("raft-test: term: reconnect error: server %s was not disconnected", id)
+	}
+
+	// Reconnecting a server might end up in a new election round, so we
+	// have to be prepared for that.
+	t.control.network.Reconnect(t.id, id)
+	if t.control.waitLeadershipPropagated(t.id, t.leadership) {
+		// Leadership was not lost and all followers are back
+		// on track.
+		return
+	}
+
+	// Leadership was lost, we must undergo a new election.
+	//
+	// FIXME: this prevents When() hooks to function properly. It's not a
+	// big deal at the moment, since Disconnect() is mainly used for
+	// snapshots, but it should be sorted.
+	term := t.control.Elect(t.id)
+	t.leadership = term.leadership
+}
+
+// Snapshot performs a snapshot on the given server.
+func (t *Term) Snapshot(id raft.ServerID) {
+	t.control.t.Helper()
+
+	r := t.control.servers[id]
+	if err := r.Snapshot().Error(); err != nil {
+		t.control.t.Fatalf("raft-test: term: snapshot error on server %s: %v", id, err)
+	}
+}
+
+// Event that is expected to happen during a Term.
+type Event struct {
+	term        *Term
+	isScheduled bool
+}
+
+// Command schedules the event to occur when the Raft.Apply() method is called
+// on the leader raft instance in order to apply the n'th command log during
+// the current term.
+func (e *Event) Command(n uint64) *Dispatch {
+	e.term.control.t.Helper()
+
+	if e.isScheduled {
+		e.term.control.t.Fatal("raft-test: error: term event already scheduled")
+	}
+	e.isScheduled = true
+
+	return &Dispatch{
+		term: e.term,
+		n:    n,
+	}
+}
+
+// Dispatch defines at which phase of the dispatch process a command log event
+// should fire.
+type Dispatch struct {
+	term  *Term
+	n     uint64
+	event *event.Event
+}
+
+// Enqueued configures the command log event to occurr when the command log is
+// enqueued, but not yet appended by the followers.
+func (d *Dispatch) Enqueued() *Action {
+	d.term.control.t.Helper()
+
+	if d.event != nil {
+		d.term.control.t.Fatal("raft-test: error: dispatch event already defined")
+	}
+	d.event = d.term.control.whenCommandEnqueued(d.term.id, d.n)
+
+	return &Action{
+		term:  d.term,
+		event: d.event,
+	}
+}
+
+// Appended configures the command log event to occurr when the command log is
+// appended by all followers, but not yet committed by the leader.
+func (d *Dispatch) Appended() *Action {
+	d.term.control.t.Helper()
+
+	if d.event != nil {
+		d.term.control.t.Fatal("raft-test: error: dispatch event already defined")
+	}
+
+	d.event = d.term.control.whenCommandAppended(d.term.id, d.n)
+
+	return &Action{
+		term:  d.term,
+		event: d.event,
+	}
+}
+
+// Committed configures the command log event to occurr when the command log is
+// committed.
+func (d *Dispatch) Committed() *Action {
+	d.term.control.t.Helper()
+
+	if d.event != nil {
+		d.term.control.t.Fatal("raft-test: error: dispatch event already defined")
+	}
+
+	d.event = d.term.control.whenCommandCommitted(d.term.id, d.n)
+
+	return &Action{
+		term:  d.term,
+		event: d.event,
+	}
+}
+
+// Action defines what should happen when the event defined in the term occurs.
+type Action struct {
+	term  *Term
+	event *event.Event
+}
+
+// Depose makes the action depose the current leader.
+func (a *Action) Depose() {
+	a.term.control.t.Helper()
+	//a.control.t.Logf(
+	//"raft-test: event: schedule depose server %s when command %d gets %s", a.id, a.n, a.phase)
+
+	a.term.control.deposing = make(chan struct{})
+
+	go func() {
+		//c.t.Logf("raft-test: node %d: fsm: wait log command %d", i, n)
+		a.term.control.deposeUponEvent(a.event, a.term.id, a.term.leadership)
+	}()
+}
+
+// Snapshot makes the action trigger a snapshot on the leader.
+//
+// The typical use is to take the snapshot after a certain command log gets
+// committed (see Dispatch.Committed()).
+func (a *Action) Snapshot() {
+	a.term.control.t.Helper()
+	// a.control.t.Logf(
+	// 	"raft-test: event: schedule snapshot server %s when command %d gets %s", a.id, a.n, a.phase)
+
+	go func() {
+		//c.t.Logf("raft-test: node %d: fsm: wait log command %d", i, n)
+		a.term.control.snapshotUponEvent(a.event, a.term.id)
+	}()
+}
diff --git a/vendor/github.com/hashicorp/raft-boltdb/LICENSE b/vendor/github.com/hashicorp/raft-boltdb/LICENSE
new file mode 100644
index 0000000000..f0e5c79e18
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft-boltdb/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/raft-boltdb/Makefile b/vendor/github.com/hashicorp/raft-boltdb/Makefile
new file mode 100644
index 0000000000..bc5c6cc011
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft-boltdb/Makefile
@@ -0,0 +1,11 @@
+DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
+
+.PHONY: test deps
+
+test:
+	go test -timeout=30s ./...
+
+deps:
+	go get -d -v ./...
+	echo $(DEPS) | xargs -n1 go get -d
+
diff --git a/vendor/github.com/hashicorp/raft-boltdb/README.md b/vendor/github.com/hashicorp/raft-boltdb/README.md
new file mode 100644
index 0000000000..5d7180ab9e
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft-boltdb/README.md
@@ -0,0 +1,11 @@
+raft-boltdb
+===========
+
+This repository provides the `raftboltdb` package. The package exports the
+`BoltStore` which is an implementation of both a `LogStore` and `StableStore`.
+
+It is meant to be used as a backend for the `raft` [package
+here](https://github.com/hashicorp/raft).
+
+This implementation uses [BoltDB](https://github.com/boltdb/bolt). BoltDB is
+a simple key/value store implemented in pure Go, and inspired by LMDB.
diff --git a/vendor/github.com/hashicorp/raft-boltdb/bolt_store.go b/vendor/github.com/hashicorp/raft-boltdb/bolt_store.go
new file mode 100644
index 0000000000..a1f9f0ba61
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft-boltdb/bolt_store.go
@@ -0,0 +1,268 @@
+package raftboltdb
+
+import (
+	"errors"
+
+	"github.com/boltdb/bolt"
+	"github.com/hashicorp/raft"
+)
+
+const (
+	// Permissions to use on the db file. This is only used if the
+	// database file does not exist and needs to be created.
+	dbFileMode = 0600
+)
+
+var (
+	// Bucket names we perform transactions in
+	dbLogs = []byte("logs")
+	dbConf = []byte("conf")
+
+	// An error indicating a given key does not exist
+	ErrKeyNotFound = errors.New("not found")
+)
+
+// BoltStore provides access to BoltDB for Raft to store and retrieve
+// log entries. It also provides key/value storage, and can be used as
+// a LogStore and StableStore.
+type BoltStore struct {
+	// conn is the underlying handle to the db.
+	conn *bolt.DB
+
+	// The path to the Bolt database file
+	path string
+}
+
+// Options contains all the configuraiton used to open the BoltDB
+type Options struct {
+	// Path is the file path to the BoltDB to use
+	Path string
+
+	// BoltOptions contains any specific BoltDB options you might
+	// want to specify [e.g. open timeout]
+	BoltOptions *bolt.Options
+
+	// NoSync causes the database to skip fsync calls after each
+	// write to the log. This is unsafe, so it should be used
+	// with caution.
+	NoSync bool
+}
+
+// readOnly returns true if the contained bolt options say to open
+// the DB in readOnly mode [this can be useful to tools that want
+// to examine the log]
+func (o *Options) readOnly() bool {
+	return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly
+}
+
+// NewBoltStore takes a file path and returns a connected Raft backend.
+func NewBoltStore(path string) (*BoltStore, error) {
+	return New(Options{Path: path})
+}
+
+// New uses the supplied options to open the BoltDB and prepare it for use as a raft backend.
+func New(options Options) (*BoltStore, error) {
+	// Try to connect
+	handle, err := bolt.Open(options.Path, dbFileMode, options.BoltOptions)
+	if err != nil {
+		return nil, err
+	}
+	handle.NoSync = options.NoSync
+
+	// Create the new store
+	store := &BoltStore{
+		conn: handle,
+		path: options.Path,
+	}
+
+	// If the store was opened read-only, don't try and create buckets
+	if !options.readOnly() {
+		// Set up our buckets
+		if err := store.initialize(); err != nil {
+			store.Close()
+			return nil, err
+		}
+	}
+	return store, nil
+}
+
+// initialize is used to set up all of the buckets.
+func (b *BoltStore) initialize() error {
+	tx, err := b.conn.Begin(true)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	// Create all the buckets
+	if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil {
+		return err
+	}
+	if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil {
+		return err
+	}
+
+	return tx.Commit()
+}
+
+// Close is used to gracefully close the DB connection.
+func (b *BoltStore) Close() error {
+	return b.conn.Close()
+}
+
+// FirstIndex returns the first known index from the Raft log.
+func (b *BoltStore) FirstIndex() (uint64, error) {
+	tx, err := b.conn.Begin(false)
+	if err != nil {
+		return 0, err
+	}
+	defer tx.Rollback()
+
+	curs := tx.Bucket(dbLogs).Cursor()
+	if first, _ := curs.First(); first == nil {
+		return 0, nil
+	} else {
+		return bytesToUint64(first), nil
+	}
+}
+
+// LastIndex returns the last known index from the Raft log.
+func (b *BoltStore) LastIndex() (uint64, error) {
+	tx, err := b.conn.Begin(false)
+	if err != nil {
+		return 0, err
+	}
+	defer tx.Rollback()
+
+	curs := tx.Bucket(dbLogs).Cursor()
+	if last, _ := curs.Last(); last == nil {
+		return 0, nil
+	} else {
+		return bytesToUint64(last), nil
+	}
+}
+
+// GetLog is used to retrieve a log from BoltDB at a given index.
+func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error {
+	tx, err := b.conn.Begin(false)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	bucket := tx.Bucket(dbLogs)
+	val := bucket.Get(uint64ToBytes(idx))
+
+	if val == nil {
+		return raft.ErrLogNotFound
+	}
+	return decodeMsgPack(val, log)
+}
+
+// StoreLog is used to store a single raft log
+func (b *BoltStore) StoreLog(log *raft.Log) error {
+	return b.StoreLogs([]*raft.Log{log})
+}
+
+// StoreLogs is used to store a set of raft logs
+func (b *BoltStore) StoreLogs(logs []*raft.Log) error {
+	tx, err := b.conn.Begin(true)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	for _, log := range logs {
+		key := uint64ToBytes(log.Index)
+		val, err := encodeMsgPack(log)
+		if err != nil {
+			return err
+		}
+		bucket := tx.Bucket(dbLogs)
+		if err := bucket.Put(key, val.Bytes()); err != nil {
+			return err
+		}
+	}
+
+	return tx.Commit()
+}
+
+// DeleteRange is used to delete logs within a given range inclusively.
+func (b *BoltStore) DeleteRange(min, max uint64) error {
+	minKey := uint64ToBytes(min)
+
+	tx, err := b.conn.Begin(true)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	curs := tx.Bucket(dbLogs).Cursor()
+	for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() {
+		// Handle out-of-range log index
+		if bytesToUint64(k) > max {
+			break
+		}
+
+		// Delete in-range log index
+		if err := curs.Delete(); err != nil {
+			return err
+		}
+	}
+
+	return tx.Commit()
+}
+
+// Set is used to set a key/value set outside of the raft log
+func (b *BoltStore) Set(k, v []byte) error {
+	tx, err := b.conn.Begin(true)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	bucket := tx.Bucket(dbConf)
+	if err := bucket.Put(k, v); err != nil {
+		return err
+	}
+
+	return tx.Commit()
+}
+
+// Get is used to retrieve a value from the k/v store by key
+func (b *BoltStore) Get(k []byte) ([]byte, error) {
+	tx, err := b.conn.Begin(false)
+	if err != nil {
+		return nil, err
+	}
+	defer tx.Rollback()
+
+	bucket := tx.Bucket(dbConf)
+	val := bucket.Get(k)
+
+	if val == nil {
+		return nil, ErrKeyNotFound
+	}
+	return append([]byte(nil), val...), nil
+}
+
+// SetUint64 is like Set, but handles uint64 values
+func (b *BoltStore) SetUint64(key []byte, val uint64) error {
+	return b.Set(key, uint64ToBytes(val))
+}
+
+// GetUint64 is like Get, but handles uint64 values
+func (b *BoltStore) GetUint64(key []byte) (uint64, error) {
+	val, err := b.Get(key)
+	if err != nil {
+		return 0, err
+	}
+	return bytesToUint64(val), nil
+}
+
+// Sync performs an fsync on the database file handle. This is not necessary
+// under normal operation unless NoSync is enabled, in which this forces the
+// database file to sync against the disk.
+func (b *BoltStore) Sync() error {
+	return b.conn.Sync()
+}
diff --git a/vendor/github.com/hashicorp/raft-boltdb/util.go b/vendor/github.com/hashicorp/raft-boltdb/util.go
new file mode 100644
index 0000000000..68dd786b7a
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft-boltdb/util.go
@@ -0,0 +1,37 @@
+package raftboltdb
+
+import (
+	"bytes"
+	"encoding/binary"
+
+	"github.com/hashicorp/go-msgpack/codec"
+)
+
+// Decode reverses the encode operation on a byte slice input
+func decodeMsgPack(buf []byte, out interface{}) error {
+	r := bytes.NewBuffer(buf)
+	hd := codec.MsgpackHandle{}
+	dec := codec.NewDecoder(r, &hd)
+	return dec.Decode(out)
+}
+
+// Encode writes an encoded object to a new bytes buffer
+func encodeMsgPack(in interface{}) (*bytes.Buffer, error) {
+	buf := bytes.NewBuffer(nil)
+	hd := codec.MsgpackHandle{}
+	enc := codec.NewEncoder(buf, &hd)
+	err := enc.Encode(in)
+	return buf, err
+}
+
+// Converts bytes to an integer
+func bytesToUint64(b []byte) uint64 {
+	return binary.BigEndian.Uint64(b)
+}
+
+// Converts a uint to a byte slice
+func uint64ToBytes(u uint64) []byte {
+	buf := make([]byte, 8)
+	binary.BigEndian.PutUint64(buf, u)
+	return buf
+}
diff --git a/vendor/github.com/hashicorp/raft/CHANGELOG.md b/vendor/github.com/hashicorp/raft/CHANGELOG.md
new file mode 100644
index 0000000000..06f1d8a6c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/CHANGELOG.md
@@ -0,0 +1,16 @@
+# 1.0.1 (April 12th, 2019)
+
+IMPROVEMENTS
+
+* InMemTransport: Add timeout for sending a message [[GH-313](https://github.com/hashicorp/raft/pull/313)]
+* ensure 'make deps' downloads test dependencies like testify [[GH-310](https://github.com/hashicorp/raft/pull/310)]
+* Clarifies function of CommitTimeout [[GH-309](https://github.com/hashicorp/raft/pull/309)]
+* Add additional metrics regarding log dispatching and committal [[GH-316](https://github.com/hashicorp/raft/pull/316)]
+
+# 1.0.0 (October 3rd, 2017)
+
+v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version manages server identities using a UUID, so introduces some breaking API changes. It also versions the Raft protocol, and requires some special steps when interoperating with Raft servers running older versions of the library (see the detailed comment in config.go about version compatibility). You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required to port Consul to these new interfaces.
+
+# 0.1.0 (September 29th, 2017)
+
+v0.1.0 is the original stable version of the library that was in master and has been maintained with no breaking API changes. This was in use by Consul prior to version 0.7.0.
diff --git a/vendor/github.com/hashicorp/raft/LICENSE b/vendor/github.com/hashicorp/raft/LICENSE
new file mode 100644
index 0000000000..c33dcc7c92
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/raft/Makefile b/vendor/github.com/hashicorp/raft/Makefile
new file mode 100644
index 0000000000..46849d88c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/Makefile
@@ -0,0 +1,20 @@
+DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
+
+test:
+	go test -timeout=60s .
+
+integ: test
+	INTEG_TESTS=yes go test -timeout=25s -run=Integ .
+
+fuzz:
+	go test -timeout=300s ./fuzzy
+	
+deps:
+	go get -t -d -v ./...
+	echo $(DEPS) | xargs -n1 go get -d
+
+cov:
+	INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html
+	open /tmp/coverage.html
+
+.PHONY: test cov integ deps
diff --git a/vendor/github.com/hashicorp/raft/README.md b/vendor/github.com/hashicorp/raft/README.md
new file mode 100644
index 0000000000..43208ebba8
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/README.md
@@ -0,0 +1,107 @@
+raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft)
+====
+
+raft is a [Go](http://www.golang.org) library that manages a replicated
+log and can be used with an FSM to manage replicated state machines. It
+is a library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)).
+
+The use cases for such a library are far-reaching as replicated state
+machines are a key component of many distributed systems. They enable
+building Consistent, Partition Tolerant (CP) systems, with limited
+fault tolerance as well.
+
+## Building
+
+If you wish to build raft you'll need Go version 1.2+ installed.
+
+Please check your installation with:
+
+```
+go version
+```
+
+## Documentation
+
+For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft).
+
+To prevent complications with cgo, the primary backend `MDBStore` is in a separate repository,
+called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation
+for the `LogStore` and `StableStore`.
+
+A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called
+[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore`
+and `StableStore`.
+
+## Tagged Releases
+
+As of September 2017, HashiCorp will start using tags for this library to clearly indicate
+major version updates. We recommend you vendor your application's dependency on this library.
+
+* v0.1.0 is the original stable version of the library that was in master and has been maintained
+with no breaking API changes. This was in use by Consul prior to version 0.7.0.
+
+* v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version
+manages server identities using a UUID, so introduces some breaking API changes. It also versions
+the Raft protocol, and requires some special steps when interoperating with Raft servers running
+older versions of the library (see the detailed comment in config.go about version compatibility).
+You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required
+to port Consul to these new interfaces.
+
+    This version includes some new features as well, including non voting servers, a new address
+    provider abstraction in the transport layer, and more resilient snapshots.
+
+## Protocol
+
+raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf)
+
+A high level overview of the Raft protocol is described below, but for details please read the full
+[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf)
+followed by the raft source. Any questions about the raft protocol should be sent to the
+[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev).
+
+### Protocol Description
+
+Raft nodes are always in one of three states: follower, candidate or leader. All
+nodes initially start out as a follower. In this state, nodes can accept log entries
+from a leader and cast votes. If no entries are received for some time, nodes
+self-promote to the candidate state. In the candidate state nodes request votes from
+their peers. If a candidate receives a quorum of votes, then it is promoted to a leader.
+The leader must accept new log entries and replicate to all the other followers.
+In addition, if stale reads are not acceptable, all queries must also be performed on
+the leader.
+
+Once a cluster has a leader, it is able to accept new log entries. A client can
+request that a leader append a new log entry, which is an opaque binary blob to
+Raft. The leader then writes the entry to durable storage and attempts to replicate
+to a quorum of followers. Once the log entry is considered *committed*, it can be
+*applied* to a finite state machine. The finite state machine is application specific,
+and is implemented using an interface.
+
+An obvious question relates to the unbounded nature of a replicated log. Raft provides
+a mechanism by which the current state is snapshotted, and the log is compacted. Because
+of the FSM abstraction, restoring the state of the FSM must result in the same state
+as a replay of old logs. This allows Raft to capture the FSM state at a point in time,
+and then remove all the logs that were used to reach that state. This is performed automatically
+without user intervention, and prevents unbounded disk usage as well as minimizing
+time spent replaying logs.
+
+Lastly, there is the issue of updating the peer set when new servers are joining
+or existing servers are leaving. As long as a quorum of nodes is available, this
+is not an issue as Raft provides mechanisms to dynamically update the peer set.
+If a quorum of nodes is unavailable, then this becomes a very challenging issue.
+For example, suppose there are only 2 peers, A and B. The quorum size is also
+2, meaning both nodes must agree to commit a log entry. If either A or B fails,
+it is now impossible to reach quorum. This means the cluster is unable to add,
+or remove a node, or commit any additional log entries. This results in *unavailability*.
+At this point, manual intervention would be required to remove either A or B,
+and to restart the remaining node in bootstrap mode.
+
+A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster
+of 5 can tolerate 2 node failures. The recommended configuration is to either
+run 3 or 5 raft servers. This maximizes availability without
+greatly sacrificing performance.
+
+In terms of performance, Raft is comparable to Paxos. Assuming stable leadership,
+committing a log entry requires a single round trip to half of the cluster.
+Thus performance is bound by disk I/O and network latency.
+
diff --git a/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/raft/api.go
new file mode 100644
index 0000000000..c6f947f241
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/api.go
@@ -0,0 +1,1013 @@
+package raft
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/hashicorp/go-hclog"
+
+	"github.com/armon/go-metrics"
+)
+
+var (
+	// ErrLeader is returned when an operation can't be completed on a
+	// leader node.
+	ErrLeader = errors.New("node is the leader")
+
+	// ErrNotLeader is returned when an operation can't be completed on a
+	// follower or candidate node.
+	ErrNotLeader = errors.New("node is not the leader")
+
+	// ErrLeadershipLost is returned when a leader fails to commit a log entry
+	// because it's been deposed in the process.
+	ErrLeadershipLost = errors.New("leadership lost while committing log")
+
+	// ErrAbortedByRestore is returned when a leader fails to commit a log
+	// entry because it's been superseded by a user snapshot restore.
+	ErrAbortedByRestore = errors.New("snapshot restored while committing log")
+
+	// ErrRaftShutdown is returned when operations are requested against an
+	// inactive Raft.
+	ErrRaftShutdown = errors.New("raft is already shutdown")
+
+	// ErrEnqueueTimeout is returned when a command fails due to a timeout.
+	ErrEnqueueTimeout = errors.New("timed out enqueuing operation")
+
+	// ErrNothingNewToSnapshot is returned when trying to create a snapshot
+	// but there's nothing new commited to the FSM since we started.
+	ErrNothingNewToSnapshot = errors.New("nothing new to snapshot")
+
+	// ErrUnsupportedProtocol is returned when an operation is attempted
+	// that's not supported by the current protocol version.
+	ErrUnsupportedProtocol = errors.New("operation not supported with current protocol version")
+
+	// ErrCantBootstrap is returned when attempt is made to bootstrap a
+	// cluster that already has state present.
+	ErrCantBootstrap = errors.New("bootstrap only works on new clusters")
+)
+
+// Raft implements a Raft node.
+type Raft struct {
+	raftState
+
+	// protocolVersion is used to inter-operate with Raft servers running
+	// different versions of the library. See comments in config.go for more
+	// details.
+	protocolVersion ProtocolVersion
+
+	// applyCh is used to async send logs to the main thread to
+	// be committed and applied to the FSM.
+	applyCh chan *logFuture
+
+	// Configuration provided at Raft initialization
+	conf Config
+
+	// FSM is the client state machine to apply commands to
+	fsm FSM
+
+	// fsmMutateCh is used to send state-changing updates to the FSM. This
+	// receives pointers to commitTuple structures when applying logs or
+	// pointers to restoreFuture structures when restoring a snapshot. We
+	// need control over the order of these operations when doing user
+	// restores so that we finish applying any old log applies before we
+	// take a user snapshot on the leader, otherwise we might restore the
+	// snapshot and apply old logs to it that were in the pipe.
+	fsmMutateCh chan interface{}
+
+	// fsmSnapshotCh is used to trigger a new snapshot being taken
+	fsmSnapshotCh chan *reqSnapshotFuture
+
+	// lastContact is the last time we had contact from the
+	// leader node. This can be used to gauge staleness.
+	lastContact     time.Time
+	lastContactLock sync.RWMutex
+
+	// Leader is the current cluster leader
+	leader     ServerAddress
+	leaderLock sync.RWMutex
+
+	// leaderCh is used to notify of leadership changes
+	leaderCh chan bool
+
+	// leaderState used only while state is leader
+	leaderState leaderState
+
+	// Stores our local server ID, used to avoid sending RPCs to ourself
+	localID ServerID
+
+	// Stores our local addr
+	localAddr ServerAddress
+
+	// Used for our logging
+	logger hclog.Logger
+
+	// LogStore provides durable storage for logs
+	logs LogStore
+
+	// Used to request the leader to make configuration changes.
+	configurationChangeCh chan *configurationChangeFuture
+
+	// Tracks the latest configuration and latest committed configuration from
+	// the log/snapshot.
+	configurations configurations
+
+	// RPC chan comes from the transport layer
+	rpcCh <-chan RPC
+
+	// Shutdown channel to exit, protected to prevent concurrent exits
+	shutdown     bool
+	shutdownCh   chan struct{}
+	shutdownLock sync.Mutex
+
+	// snapshots is used to store and retrieve snapshots
+	snapshots SnapshotStore
+
+	// userSnapshotCh is used for user-triggered snapshots
+	userSnapshotCh chan *userSnapshotFuture
+
+	// userRestoreCh is used for user-triggered restores of external
+	// snapshots
+	userRestoreCh chan *userRestoreFuture
+
+	// stable is a StableStore implementation for durable state
+	// It provides stable storage for many fields in raftState
+	stable StableStore
+
+	// The transport layer we use
+	trans Transport
+
+	// verifyCh is used to async send verify futures to the main thread
+	// to verify we are still the leader
+	verifyCh chan *verifyFuture
+
+	// configurationsCh is used to get the configuration data safely from
+	// outside of the main thread.
+	configurationsCh chan *configurationsFuture
+
+	// bootstrapCh is used to attempt an initial bootstrap from outside of
+	// the main thread.
+	bootstrapCh chan *bootstrapFuture
+
+	// List of observers and the mutex that protects them. The observers list
+	// is indexed by an artificial ID which is used for deregistration.
+	observersLock sync.RWMutex
+	observers     map[uint64]*Observer
+}
+
+// BootstrapCluster initializes a server's storage with the given cluster
+// configuration. This should only be called at the beginning of time for the
+// cluster, and you absolutely must make sure that you call it with the same
+// configuration on all the Voter servers. There is no need to bootstrap
+// Nonvoter and Staging servers.
+//
+// One sane approach is to bootstrap a single server with a configuration
+// listing just itself as a Voter, then invoke AddVoter() on it to add other
+// servers to the cluster.
+func BootstrapCluster(conf *Config, logs LogStore, stable StableStore,
+	snaps SnapshotStore, trans Transport, configuration Configuration) error {
+	// Validate the Raft server config.
+	if err := ValidateConfig(conf); err != nil {
+		return err
+	}
+
+	// Sanity check the Raft peer configuration.
+	if err := checkConfiguration(configuration); err != nil {
+		return err
+	}
+
+	// Make sure the cluster is in a clean state.
+	hasState, err := HasExistingState(logs, stable, snaps)
+	if err != nil {
+		return fmt.Errorf("failed to check for existing state: %v", err)
+	}
+	if hasState {
+		return ErrCantBootstrap
+	}
+
+	// Set current term to 1.
+	if err := stable.SetUint64(keyCurrentTerm, 1); err != nil {
+		return fmt.Errorf("failed to save current term: %v", err)
+	}
+
+	// Append configuration entry to log.
+	entry := &Log{
+		Index: 1,
+		Term:  1,
+	}
+	if conf.ProtocolVersion < 3 {
+		entry.Type = LogRemovePeerDeprecated
+		entry.Data = encodePeers(configuration, trans)
+	} else {
+		entry.Type = LogConfiguration
+		entry.Data = encodeConfiguration(configuration)
+	}
+	if err := logs.StoreLog(entry); err != nil {
+		return fmt.Errorf("failed to append configuration entry to log: %v", err)
+	}
+
+	return nil
+}
+
+// RecoverCluster is used to manually force a new configuration in order to
+// recover from a loss of quorum where the current configuration cannot be
+// restored, such as when several servers die at the same time. This works by
+// reading all the current state for this server, creating a snapshot with the
+// supplied configuration, and then truncating the Raft log. This is the only
+// safe way to force a given configuration without actually altering the log to
+// insert any new entries, which could cause conflicts with other servers with
+// different state.
+//
+// WARNING! This operation implicitly commits all entries in the Raft log, so
+// in general this is an extremely unsafe operation. If you've lost your other
+// servers and are performing a manual recovery, then you've also lost the
+// commit information, so this is likely the best you can do, but you should be
+// aware that calling this can cause Raft log entries that were in the process
+// of being replicated but not yet be committed to be committed.
+//
+// Note the FSM passed here is used for the snapshot operations and will be
+// left in a state that should not be used by the application. Be sure to
+// discard this FSM and any associated state and provide a fresh one when
+// calling NewRaft later.
+//
+// A typical way to recover the cluster is to shut down all servers and then
+// run RecoverCluster on every server using an identical configuration. When
+// the cluster is then restarted, and election should occur and then Raft will
+// resume normal operation. If it's desired to make a particular server the
+// leader, this can be used to inject a new configuration with that server as
+// the sole voter, and then join up other new clean-state peer servers using
+// the usual APIs in order to bring the cluster back into a known state.
+func RecoverCluster(conf *Config, fsm FSM, logs LogStore, stable StableStore,
+	snaps SnapshotStore, trans Transport, configuration Configuration) error {
+	// Validate the Raft server config.
+	if err := ValidateConfig(conf); err != nil {
+		return err
+	}
+
+	// Sanity check the Raft peer configuration.
+	if err := checkConfiguration(configuration); err != nil {
+		return err
+	}
+
+	// Refuse to recover if there's no existing state. This would be safe to
+	// do, but it is likely an indication of an operator error where they
+	// expect data to be there and it's not. By refusing, we force them
+	// to show intent to start a cluster fresh by explicitly doing a
+	// bootstrap, rather than quietly fire up a fresh cluster here.
+	hasState, err := HasExistingState(logs, stable, snaps)
+	if err != nil {
+		return fmt.Errorf("failed to check for existing state: %v", err)
+	}
+	if !hasState {
+		return fmt.Errorf("refused to recover cluster with no initial state, this is probably an operator error")
+	}
+
+	// Attempt to restore any snapshots we find, newest to oldest.
+	var snapshotIndex uint64
+	var snapshotTerm uint64
+	snapshots, err := snaps.List()
+	if err != nil {
+		return fmt.Errorf("failed to list snapshots: %v", err)
+	}
+	for _, snapshot := range snapshots {
+		_, source, err := snaps.Open(snapshot.ID)
+		if err != nil {
+			// Skip this one and try the next. We will detect if we
+			// couldn't open any snapshots.
+			continue
+		}
+		defer source.Close()
+
+		if err := fsm.Restore(source); err != nil {
+			// Same here, skip and try the next one.
+			continue
+		}
+
+		snapshotIndex = snapshot.Index
+		snapshotTerm = snapshot.Term
+		break
+	}
+	if len(snapshots) > 0 && (snapshotIndex == 0 || snapshotTerm == 0) {
+		return fmt.Errorf("failed to restore any of the available snapshots")
+	}
+
+	// The snapshot information is the best known end point for the data
+	// until we play back the Raft log entries.
+	lastIndex := snapshotIndex
+	lastTerm := snapshotTerm
+
+	// Apply any Raft log entries past the snapshot.
+	lastLogIndex, err := logs.LastIndex()
+	if err != nil {
+		return fmt.Errorf("failed to find last log: %v", err)
+	}
+	for index := snapshotIndex + 1; index <= lastLogIndex; index++ {
+		var entry Log
+		if err := logs.GetLog(index, &entry); err != nil {
+			return fmt.Errorf("failed to get log at index %d: %v", index, err)
+		}
+		if entry.Type == LogCommand {
+			_ = fsm.Apply(&entry)
+		}
+		lastIndex = entry.Index
+		lastTerm = entry.Term
+	}
+
+	// Create a new snapshot, placing the configuration in as if it was
+	// committed at index 1.
+	snapshot, err := fsm.Snapshot()
+	if err != nil {
+		return fmt.Errorf("failed to snapshot FSM: %v", err)
+	}
+	version := getSnapshotVersion(conf.ProtocolVersion)
+	sink, err := snaps.Create(version, lastIndex, lastTerm, configuration, 1, trans)
+	if err != nil {
+		return fmt.Errorf("failed to create snapshot: %v", err)
+	}
+	if err := snapshot.Persist(sink); err != nil {
+		return fmt.Errorf("failed to persist snapshot: %v", err)
+	}
+	if err := sink.Close(); err != nil {
+		return fmt.Errorf("failed to finalize snapshot: %v", err)
+	}
+
+	// Compact the log so that we don't get bad interference from any
+	// configuration change log entries that might be there.
+	firstLogIndex, err := logs.FirstIndex()
+	if err != nil {
+		return fmt.Errorf("failed to get first log index: %v", err)
+	}
+	if err := logs.DeleteRange(firstLogIndex, lastLogIndex); err != nil {
+		return fmt.Errorf("log compaction failed: %v", err)
+	}
+
+	return nil
+}
+
+// HasExistingState returns true if the server has any existing state (logs,
+// knowledge of a current term, or any snapshots).
+func HasExistingState(logs LogStore, stable StableStore, snaps SnapshotStore) (bool, error) {
+	// Make sure we don't have a current term.
+	currentTerm, err := stable.GetUint64(keyCurrentTerm)
+	if err == nil {
+		if currentTerm > 0 {
+			return true, nil
+		}
+	} else {
+		if err.Error() != "not found" {
+			return false, fmt.Errorf("failed to read current term: %v", err)
+		}
+	}
+
+	// Make sure we have an empty log.
+	lastIndex, err := logs.LastIndex()
+	if err != nil {
+		return false, fmt.Errorf("failed to get last log index: %v", err)
+	}
+	if lastIndex > 0 {
+		return true, nil
+	}
+
+	// Make sure we have no snapshots
+	snapshots, err := snaps.List()
+	if err != nil {
+		return false, fmt.Errorf("failed to list snapshots: %v", err)
+	}
+	if len(snapshots) > 0 {
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// NewRaft is used to construct a new Raft node. It takes a configuration, as well
+// as implementations of various interfaces that are required. If we have any
+// old state, such as snapshots, logs, peers, etc, all those will be restored
+// when creating the Raft node.
+func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, trans Transport) (*Raft, error) {
+	// Validate the configuration.
+	if err := ValidateConfig(conf); err != nil {
+		return nil, err
+	}
+
+	// Ensure we have a LogOutput.
+	var logger hclog.Logger
+	if conf.Logger != nil {
+		logger = conf.Logger
+	} else {
+		if conf.LogOutput == nil {
+			conf.LogOutput = os.Stderr
+		}
+
+		logger = hclog.New(&hclog.LoggerOptions{
+			Name:   "raft",
+			Level:  hclog.LevelFromString(conf.LogLevel),
+			Output: conf.LogOutput,
+		})
+	}
+
+	// Try to restore the current term.
+	currentTerm, err := stable.GetUint64(keyCurrentTerm)
+	if err != nil && err.Error() != "not found" {
+		return nil, fmt.Errorf("failed to load current term: %v", err)
+	}
+
+	// Read the index of the last log entry.
+	lastIndex, err := logs.LastIndex()
+	if err != nil {
+		return nil, fmt.Errorf("failed to find last log: %v", err)
+	}
+
+	// Get the last log entry.
+	var lastLog Log
+	if lastIndex > 0 {
+		if err = logs.GetLog(lastIndex, &lastLog); err != nil {
+			return nil, fmt.Errorf("failed to get last log at index %d: %v", lastIndex, err)
+		}
+	}
+
+	// Make sure we have a valid server address and ID.
+	protocolVersion := conf.ProtocolVersion
+	localAddr := ServerAddress(trans.LocalAddr())
+	localID := conf.LocalID
+
+	// TODO (slackpad) - When we deprecate protocol version 2, remove this
+	// along with the AddPeer() and RemovePeer() APIs.
+	if protocolVersion < 3 && string(localID) != string(localAddr) {
+		return nil, fmt.Errorf("when running with ProtocolVersion < 3, LocalID must be set to the network address")
+	}
+
+	// Create Raft struct.
+	r := &Raft{
+		protocolVersion: protocolVersion,
+		applyCh:         make(chan *logFuture),
+		conf:            *conf,
+		fsm:             fsm,
+		fsmMutateCh:     make(chan interface{}, 128),
+		fsmSnapshotCh:   make(chan *reqSnapshotFuture),
+		leaderCh:        make(chan bool),
+		localID:         localID,
+		localAddr:       localAddr,
+		logger:          logger,
+		logs:            logs,
+		configurationChangeCh: make(chan *configurationChangeFuture),
+		configurations:        configurations{},
+		rpcCh:                 trans.Consumer(),
+		snapshots:             snaps,
+		userSnapshotCh:        make(chan *userSnapshotFuture),
+		userRestoreCh:         make(chan *userRestoreFuture),
+		shutdownCh:            make(chan struct{}),
+		stable:                stable,
+		trans:                 trans,
+		verifyCh:              make(chan *verifyFuture, 64),
+		configurationsCh:      make(chan *configurationsFuture, 8),
+		bootstrapCh:           make(chan *bootstrapFuture),
+		observers:             make(map[uint64]*Observer),
+	}
+
+	// Initialize as a follower.
+	r.setState(Follower)
+
+	// Start as leader if specified. This should only be used
+	// for testing purposes.
+	if conf.StartAsLeader {
+		r.setState(Leader)
+		r.setLeader(r.localAddr)
+	}
+
+	// Restore the current term and the last log.
+	r.setCurrentTerm(currentTerm)
+	r.setLastLog(lastLog.Index, lastLog.Term)
+
+	// Attempt to restore a snapshot if there are any.
+	if err := r.restoreSnapshot(); err != nil {
+		return nil, err
+	}
+
+	// Scan through the log for any configuration change entries.
+	snapshotIndex, _ := r.getLastSnapshot()
+	for index := snapshotIndex + 1; index <= lastLog.Index; index++ {
+		var entry Log
+		if err := r.logs.GetLog(index, &entry); err != nil {
+			r.logger.Error(fmt.Sprintf("Failed to get log at %d: %v", index, err))
+			panic(err)
+		}
+		r.processConfigurationLogEntry(&entry)
+	}
+	r.logger.Info(fmt.Sprintf("Initial configuration (index=%d): %+v",
+		r.configurations.latestIndex, r.configurations.latest.Servers))
+
+	// Setup a heartbeat fast-path to avoid head-of-line
+	// blocking where possible. It MUST be safe for this
+	// to be called concurrently with a blocking RPC.
+	trans.SetHeartbeatHandler(r.processHeartbeat)
+
+	// Start the background work.
+	r.goFunc(r.run)
+	r.goFunc(r.runFSM)
+	r.goFunc(r.runSnapshots)
+	return r, nil
+}
+
+// restoreSnapshot attempts to restore the latest snapshots, and fails if none
+// of them can be restored. This is called at initialization time, and is
+// completely unsafe to call at any other time.
+func (r *Raft) restoreSnapshot() error {
+	snapshots, err := r.snapshots.List()
+	if err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to list snapshots: %v", err))
+		return err
+	}
+
+	// Try to load in order of newest to oldest
+	for _, snapshot := range snapshots {
+		_, source, err := r.snapshots.Open(snapshot.ID)
+		if err != nil {
+			r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapshot.ID, err))
+			continue
+		}
+		defer source.Close()
+
+		if err := r.fsm.Restore(source); err != nil {
+			r.logger.Error(fmt.Sprintf("Failed to restore snapshot %v: %v", snapshot.ID, err))
+			continue
+		}
+
+		// Log success
+		r.logger.Info(fmt.Sprintf("Restored from snapshot %v", snapshot.ID))
+
+		// Update the lastApplied so we don't replay old logs
+		r.setLastApplied(snapshot.Index)
+
+		// Update the last stable snapshot info
+		r.setLastSnapshot(snapshot.Index, snapshot.Term)
+
+		// Update the configuration
+		if snapshot.Version > 0 {
+			r.configurations.committed = snapshot.Configuration
+			r.configurations.committedIndex = snapshot.ConfigurationIndex
+			r.configurations.latest = snapshot.Configuration
+			r.configurations.latestIndex = snapshot.ConfigurationIndex
+		} else {
+			configuration := decodePeers(snapshot.Peers, r.trans)
+			r.configurations.committed = configuration
+			r.configurations.committedIndex = snapshot.Index
+			r.configurations.latest = configuration
+			r.configurations.latestIndex = snapshot.Index
+		}
+
+		// Success!
+		return nil
+	}
+
+	// If we had snapshots and failed to load them, its an error
+	if len(snapshots) > 0 {
+		return fmt.Errorf("failed to load any existing snapshots")
+	}
+	return nil
+}
+
+// BootstrapCluster is equivalent to non-member BootstrapCluster but can be
+// called on an un-bootstrapped Raft instance after it has been created. This
+// should only be called at the beginning of time for the cluster, and you
+// absolutely must make sure that you call it with the same configuration on all
+// the Voter servers. There is no need to bootstrap Nonvoter and Staging
+// servers.
+func (r *Raft) BootstrapCluster(configuration Configuration) Future {
+	bootstrapReq := &bootstrapFuture{}
+	bootstrapReq.init()
+	bootstrapReq.configuration = configuration
+	select {
+	case <-r.shutdownCh:
+		return errorFuture{ErrRaftShutdown}
+	case r.bootstrapCh <- bootstrapReq:
+		return bootstrapReq
+	}
+}
+
+// Leader is used to return the current leader of the cluster.
+// It may return empty string if there is no current leader
+// or the leader is unknown.
+func (r *Raft) Leader() ServerAddress {
+	r.leaderLock.RLock()
+	leader := r.leader
+	r.leaderLock.RUnlock()
+	return leader
+}
+
+// Apply is used to apply a command to the FSM in a highly consistent
+// manner. This returns a future that can be used to wait on the application.
+// An optional timeout can be provided to limit the amount of time we wait
+// for the command to be started. This must be run on the leader or it
+// will fail.
+func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture {
+	metrics.IncrCounter([]string{"raft", "apply"}, 1)
+	var timer <-chan time.Time
+	if timeout > 0 {
+		timer = time.After(timeout)
+	}
+
+	// Create a log future, no index or term yet
+	logFuture := &logFuture{
+		log: Log{
+			Type: LogCommand,
+			Data: cmd,
+		},
+	}
+	logFuture.init()
+
+	select {
+	case <-timer:
+		return errorFuture{ErrEnqueueTimeout}
+	case <-r.shutdownCh:
+		return errorFuture{ErrRaftShutdown}
+	case r.applyCh <- logFuture:
+		return logFuture
+	}
+}
+
+// Barrier is used to issue a command that blocks until all preceeding
+// operations have been applied to the FSM. It can be used to ensure the
+// FSM reflects all queued writes. An optional timeout can be provided to
+// limit the amount of time we wait for the command to be started. This
+// must be run on the leader or it will fail.
+func (r *Raft) Barrier(timeout time.Duration) Future {
+	metrics.IncrCounter([]string{"raft", "barrier"}, 1)
+	var timer <-chan time.Time
+	if timeout > 0 {
+		timer = time.After(timeout)
+	}
+
+	// Create a log future, no index or term yet
+	logFuture := &logFuture{
+		log: Log{
+			Type: LogBarrier,
+		},
+	}
+	logFuture.init()
+
+	select {
+	case <-timer:
+		return errorFuture{ErrEnqueueTimeout}
+	case <-r.shutdownCh:
+		return errorFuture{ErrRaftShutdown}
+	case r.applyCh <- logFuture:
+		return logFuture
+	}
+}
+
+// VerifyLeader is used to ensure the current node is still
+// the leader. This can be done to prevent stale reads when a
+// new leader has potentially been elected.
+func (r *Raft) VerifyLeader() Future {
+	metrics.IncrCounter([]string{"raft", "verify_leader"}, 1)
+	verifyFuture := &verifyFuture{}
+	verifyFuture.init()
+	select {
+	case <-r.shutdownCh:
+		return errorFuture{ErrRaftShutdown}
+	case r.verifyCh <- verifyFuture:
+		return verifyFuture
+	}
+}
+
+// GetConfiguration returns the latest configuration and its associated index
+// currently in use. This may not yet be committed. This must not be called on
+// the main thread (which can access the information directly).
+func (r *Raft) GetConfiguration() ConfigurationFuture {
+	configReq := &configurationsFuture{}
+	configReq.init()
+	select {
+	case <-r.shutdownCh:
+		configReq.respond(ErrRaftShutdown)
+		return configReq
+	case r.configurationsCh <- configReq:
+		return configReq
+	}
+}
+
+// AddPeer (deprecated) is used to add a new peer into the cluster. This must be
+// run on the leader or it will fail. Use AddVoter/AddNonvoter instead.
+func (r *Raft) AddPeer(peer ServerAddress) Future {
+	if r.protocolVersion > 2 {
+		return errorFuture{ErrUnsupportedProtocol}
+	}
+
+	return r.requestConfigChange(configurationChangeRequest{
+		command:       AddStaging,
+		serverID:      ServerID(peer),
+		serverAddress: peer,
+		prevIndex:     0,
+	}, 0)
+}
+
+// RemovePeer (deprecated) is used to remove a peer from the cluster. If the
+// current leader is being removed, it will cause a new election
+// to occur. This must be run on the leader or it will fail.
+// Use RemoveServer instead.
+func (r *Raft) RemovePeer(peer ServerAddress) Future {
+	if r.protocolVersion > 2 {
+		return errorFuture{ErrUnsupportedProtocol}
+	}
+
+	return r.requestConfigChange(configurationChangeRequest{
+		command:   RemoveServer,
+		serverID:  ServerID(peer),
+		prevIndex: 0,
+	}, 0)
+}
+
+// AddVoter will add the given server to the cluster as a staging server. If the
+// server is already in the cluster as a voter, this updates the server's address.
+// This must be run on the leader or it will fail. The leader will promote the
+// staging server to a voter once that server is ready. If nonzero, prevIndex is
+// the index of the only configuration upon which this change may be applied; if
+// another configuration entry has been added in the meantime, this request will
+// fail. If nonzero, timeout is how long this server should wait before the
+// configuration change log entry is appended.
+func (r *Raft) AddVoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture {
+	if r.protocolVersion < 2 {
+		return errorFuture{ErrUnsupportedProtocol}
+	}
+
+	return r.requestConfigChange(configurationChangeRequest{
+		command:       AddStaging,
+		serverID:      id,
+		serverAddress: address,
+		prevIndex:     prevIndex,
+	}, timeout)
+}
+
+// AddNonvoter will add the given server to the cluster but won't assign it a
+// vote. The server will receive log entries, but it won't participate in
+// elections or log entry commitment. If the server is already in the cluster,
+// this updates the server's address. This must be run on the leader or it will
+// fail. For prevIndex and timeout, see AddVoter.
+func (r *Raft) AddNonvoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture {
+	if r.protocolVersion < 3 {
+		return errorFuture{ErrUnsupportedProtocol}
+	}
+
+	return r.requestConfigChange(configurationChangeRequest{
+		command:       AddNonvoter,
+		serverID:      id,
+		serverAddress: address,
+		prevIndex:     prevIndex,
+	}, timeout)
+}
+
+// RemoveServer will remove the given server from the cluster. If the current
+// leader is being removed, it will cause a new election to occur. This must be
+// run on the leader or it will fail. For prevIndex and timeout, see AddVoter.
+func (r *Raft) RemoveServer(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture {
+	if r.protocolVersion < 2 {
+		return errorFuture{ErrUnsupportedProtocol}
+	}
+
+	return r.requestConfigChange(configurationChangeRequest{
+		command:   RemoveServer,
+		serverID:  id,
+		prevIndex: prevIndex,
+	}, timeout)
+}
+
+// DemoteVoter will take away a server's vote, if it has one. If present, the
+// server will continue to receive log entries, but it won't participate in
+// elections or log entry commitment. If the server is not in the cluster, this
+// does nothing. This must be run on the leader or it will fail. For prevIndex
+// and timeout, see AddVoter.
+func (r *Raft) DemoteVoter(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture {
+	if r.protocolVersion < 3 {
+		return errorFuture{ErrUnsupportedProtocol}
+	}
+
+	return r.requestConfigChange(configurationChangeRequest{
+		command:   DemoteVoter,
+		serverID:  id,
+		prevIndex: prevIndex,
+	}, timeout)
+}
+
+// Shutdown is used to stop the Raft background routines.
+// This is not a graceful operation. Provides a future that
+// can be used to block until all background routines have exited.
+func (r *Raft) Shutdown() Future {
+	r.shutdownLock.Lock()
+	defer r.shutdownLock.Unlock()
+
+	if !r.shutdown {
+		close(r.shutdownCh)
+		r.shutdown = true
+		r.setState(Shutdown)
+		return &shutdownFuture{r}
+	}
+
+	// avoid closing transport twice
+	return &shutdownFuture{nil}
+}
+
+// Snapshot is used to manually force Raft to take a snapshot. Returns a future
+// that can be used to block until complete, and that contains a function that
+// can be used to open the snapshot.
+func (r *Raft) Snapshot() SnapshotFuture {
+	future := &userSnapshotFuture{}
+	future.init()
+	select {
+	case r.userSnapshotCh <- future:
+		return future
+	case <-r.shutdownCh:
+		future.respond(ErrRaftShutdown)
+		return future
+	}
+}
+
+// Restore is used to manually force Raft to consume an external snapshot, such
+// as if restoring from a backup. We will use the current Raft configuration,
+// not the one from the snapshot, so that we can restore into a new cluster. We
+// will also use the higher of the index of the snapshot, or the current index,
+// and then add 1 to that, so we force a new state with a hole in the Raft log,
+// so that the snapshot will be sent to followers and used for any new joiners.
+// This can only be run on the leader, and blocks until the restore is complete
+// or an error occurs.
+//
+// WARNING! This operation has the leader take on the state of the snapshot and
+// then sets itself up so that it replicates that to its followers though the
+// install snapshot process. This involves a potentially dangerous period where
+// the leader commits ahead of its followers, so should only be used for disaster
+// recovery into a fresh cluster, and should not be used in normal operations.
+func (r *Raft) Restore(meta *SnapshotMeta, reader io.Reader, timeout time.Duration) error {
+	metrics.IncrCounter([]string{"raft", "restore"}, 1)
+	var timer <-chan time.Time
+	if timeout > 0 {
+		timer = time.After(timeout)
+	}
+
+	// Perform the restore.
+	restore := &userRestoreFuture{
+		meta:   meta,
+		reader: reader,
+	}
+	restore.init()
+	select {
+	case <-timer:
+		return ErrEnqueueTimeout
+	case <-r.shutdownCh:
+		return ErrRaftShutdown
+	case r.userRestoreCh <- restore:
+		// If the restore is ingested then wait for it to complete.
+		if err := restore.Error(); err != nil {
+			return err
+		}
+	}
+
+	// Apply a no-op log entry. Waiting for this allows us to wait until the
+	// followers have gotten the restore and replicated at least this new
+	// entry, which shows that we've also faulted and installed the
+	// snapshot with the contents of the restore.
+	noop := &logFuture{
+		log: Log{
+			Type: LogNoop,
+		},
+	}
+	noop.init()
+	select {
+	case <-timer:
+		return ErrEnqueueTimeout
+	case <-r.shutdownCh:
+		return ErrRaftShutdown
+	case r.applyCh <- noop:
+		return noop.Error()
+	}
+}
+
+// State is used to return the current raft state.
+func (r *Raft) State() RaftState {
+	return r.getState()
+}
+
+// LeaderCh is used to get a channel which delivers signals on
+// acquiring or losing leadership. It sends true if we become
+// the leader, and false if we lose it. The channel is not buffered,
+// and does not block on writes.
+func (r *Raft) LeaderCh() <-chan bool {
+	return r.leaderCh
+}
+
+// String returns a string representation of this Raft node.
+func (r *Raft) String() string {
+	return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState())
+}
+
+// LastContact returns the time of last contact by a leader.
+// This only makes sense if we are currently a follower.
+func (r *Raft) LastContact() time.Time {
+	r.lastContactLock.RLock()
+	last := r.lastContact
+	r.lastContactLock.RUnlock()
+	return last
+}
+
+// Stats is used to return a map of various internal stats. This
+// should only be used for informative purposes or debugging.
+//
+// Keys are: "state", "term", "last_log_index", "last_log_term",
+// "commit_index", "applied_index", "fsm_pending",
+// "last_snapshot_index", "last_snapshot_term",
+// "latest_configuration", "last_contact", and "num_peers".
+//
+// The value of "state" is a numerical value representing a
+// RaftState const.
+//
+// The value of "latest_configuration" is a string which contains
+// the id of each server, its suffrage status, and its address.
+//
+// The value of "last_contact" is either "never" if there
+// has been no contact with a leader, "0" if the node is in the
+// leader state, or the time since last contact with a leader
+// formatted as a string.
+//
+// The value of "num_peers" is the number of other voting servers in the
+// cluster, not including this node. If this node isn't part of the
+// configuration then this will be "0".
+//
+// All other values are uint64s, formatted as strings.
+func (r *Raft) Stats() map[string]string {
+	toString := func(v uint64) string {
+		return strconv.FormatUint(v, 10)
+	}
+	lastLogIndex, lastLogTerm := r.getLastLog()
+	lastSnapIndex, lastSnapTerm := r.getLastSnapshot()
+	s := map[string]string{
+		"state":                r.getState().String(),
+		"term":                 toString(r.getCurrentTerm()),
+		"last_log_index":       toString(lastLogIndex),
+		"last_log_term":        toString(lastLogTerm),
+		"commit_index":         toString(r.getCommitIndex()),
+		"applied_index":        toString(r.getLastApplied()),
+		"fsm_pending":          toString(uint64(len(r.fsmMutateCh))),
+		"last_snapshot_index":  toString(lastSnapIndex),
+		"last_snapshot_term":   toString(lastSnapTerm),
+		"protocol_version":     toString(uint64(r.protocolVersion)),
+		"protocol_version_min": toString(uint64(ProtocolVersionMin)),
+		"protocol_version_max": toString(uint64(ProtocolVersionMax)),
+		"snapshot_version_min": toString(uint64(SnapshotVersionMin)),
+		"snapshot_version_max": toString(uint64(SnapshotVersionMax)),
+	}
+
+	future := r.GetConfiguration()
+	if err := future.Error(); err != nil {
+		r.logger.Warn(fmt.Sprintf("could not get configuration for Stats: %v", err))
+	} else {
+		configuration := future.Configuration()
+		s["latest_configuration_index"] = toString(future.Index())
+		s["latest_configuration"] = fmt.Sprintf("%+v", configuration.Servers)
+
+		// This is a legacy metric that we've seen people use in the wild.
+		hasUs := false
+		numPeers := 0
+		for _, server := range configuration.Servers {
+			if server.Suffrage == Voter {
+				if server.ID == r.localID {
+					hasUs = true
+				} else {
+					numPeers++
+				}
+			}
+		}
+		if !hasUs {
+			numPeers = 0
+		}
+		s["num_peers"] = toString(uint64(numPeers))
+	}
+
+	last := r.LastContact()
+	if r.getState() == Leader {
+		s["last_contact"] = "0"
+	} else if last.IsZero() {
+		s["last_contact"] = "never"
+	} else {
+		s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last))
+	}
+	return s
+}
+
+// LastIndex returns the last index in stable storage,
+// either from the last log or from the last snapshot.
+func (r *Raft) LastIndex() uint64 {
+	return r.getLastIndex()
+}
+
+// AppliedIndex returns the last index applied to the FSM. This is generally
+// lagging behind the last index, especially for indexes that are persisted but
+// have not yet been considered committed by the leader. NOTE - this reflects
+// the last index that was sent to the application's FSM over the apply channel
+// but DOES NOT mean that the application's FSM has yet consumed it and applied
+// it to its internal state. Thus, the application's state may lag behind this
+// index.
+func (r *Raft) AppliedIndex() uint64 {
+	return r.getLastApplied()
+}
diff --git a/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/raft/commands.go
new file mode 100644
index 0000000000..5d89e7bcdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/commands.go
@@ -0,0 +1,151 @@
+package raft
+
+// RPCHeader is a common sub-structure used to pass along protocol version and
+// other information about the cluster. For older Raft implementations before
+// versioning was added this will default to a zero-valued structure when read
+// by newer Raft versions.
+type RPCHeader struct {
+	// ProtocolVersion is the version of the protocol the sender is
+	// speaking.
+	ProtocolVersion ProtocolVersion
+}
+
+// WithRPCHeader is an interface that exposes the RPC header.
+type WithRPCHeader interface {
+	GetRPCHeader() RPCHeader
+}
+
+// AppendEntriesRequest is the command used to append entries to the
+// replicated log.
+type AppendEntriesRequest struct {
+	RPCHeader
+
+	// Provide the current term and leader
+	Term   uint64
+	Leader []byte
+
+	// Provide the previous entries for integrity checking
+	PrevLogEntry uint64
+	PrevLogTerm  uint64
+
+	// New entries to commit
+	Entries []*Log
+
+	// Commit index on the leader
+	LeaderCommitIndex uint64
+}
+
+// See WithRPCHeader.
+func (r *AppendEntriesRequest) GetRPCHeader() RPCHeader {
+	return r.RPCHeader
+}
+
+// AppendEntriesResponse is the response returned from an
+// AppendEntriesRequest.
+type AppendEntriesResponse struct {
+	RPCHeader
+
+	// Newer term if leader is out of date
+	Term uint64
+
+	// Last Log is a hint to help accelerate rebuilding slow nodes
+	LastLog uint64
+
+	// We may not succeed if we have a conflicting entry
+	Success bool
+
+	// There are scenarios where this request didn't succeed
+	// but there's no need to wait/back-off the next attempt.
+	NoRetryBackoff bool
+}
+
+// See WithRPCHeader.
+func (r *AppendEntriesResponse) GetRPCHeader() RPCHeader {
+	return r.RPCHeader
+}
+
+// RequestVoteRequest is the command used by a candidate to ask a Raft peer
+// for a vote in an election.
+type RequestVoteRequest struct {
+	RPCHeader
+
+	// Provide the term and our id
+	Term      uint64
+	Candidate []byte
+
+	// Used to ensure safety
+	LastLogIndex uint64
+	LastLogTerm  uint64
+}
+
+// See WithRPCHeader.
+func (r *RequestVoteRequest) GetRPCHeader() RPCHeader {
+	return r.RPCHeader
+}
+
+// RequestVoteResponse is the response returned from a RequestVoteRequest.
+type RequestVoteResponse struct {
+	RPCHeader
+
+	// Newer term if leader is out of date.
+	Term uint64
+
+	// Peers is deprecated, but required by servers that only understand
+	// protocol version 0. This is not populated in protocol version 2
+	// and later.
+	Peers []byte
+
+	// Is the vote granted.
+	Granted bool
+}
+
+// See WithRPCHeader.
+func (r *RequestVoteResponse) GetRPCHeader() RPCHeader {
+	return r.RPCHeader
+}
+
+// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its
+// log (and state machine) from a snapshot on another peer.
+type InstallSnapshotRequest struct {
+	RPCHeader
+	SnapshotVersion SnapshotVersion
+
+	Term   uint64
+	Leader []byte
+
+	// These are the last index/term included in the snapshot
+	LastLogIndex uint64
+	LastLogTerm  uint64
+
+	// Peer Set in the snapshot. This is deprecated in favor of Configuration
+	// but remains here in case we receive an InstallSnapshot from a leader
+	// that's running old code.
+	Peers []byte
+
+	// Cluster membership.
+	Configuration []byte
+	// Log index where 'Configuration' entry was originally written.
+	ConfigurationIndex uint64
+
+	// Size of the snapshot
+	Size int64
+}
+
+// See WithRPCHeader.
+func (r *InstallSnapshotRequest) GetRPCHeader() RPCHeader {
+	return r.RPCHeader
+}
+
+// InstallSnapshotResponse is the response returned from an
+// InstallSnapshotRequest.
+type InstallSnapshotResponse struct {
+	RPCHeader
+
+	Term    uint64
+	Success bool
+}
+
+// See WithRPCHeader.
+func (r *InstallSnapshotResponse) GetRPCHeader() RPCHeader {
+	return r.RPCHeader
+}
diff --git a/vendor/github.com/hashicorp/raft/commitment.go b/vendor/github.com/hashicorp/raft/commitment.go
new file mode 100644
index 0000000000..7aa36464ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/commitment.go
@@ -0,0 +1,101 @@
+package raft
+
+import (
+	"sort"
+	"sync"
+)
+
+// Commitment is used to advance the leader's commit index. The leader and
+// replication goroutines report in newly written entries with Match(), and
+// this notifies on commitCh when the commit index has advanced.
+type commitment struct {
+	// protects matchIndexes and commitIndex
+	sync.Mutex
+	// notified when commitIndex increases
+	commitCh chan struct{}
+	// voter ID to log index: the server stores up through this log entry
+	matchIndexes map[ServerID]uint64
+	// a quorum stores up through this log entry. monotonically increases.
+	commitIndex uint64
+	// the first index of this leader's term: this needs to be replicated to a
+	// majority of the cluster before this leader may mark anything committed
+	// (per Raft's commitment rule)
+	startIndex uint64
+}
+
+// newCommitment returns an commitment struct that notifies the provided
+// channel when log entries have been committed. A new commitment struct is
+// created each time this server becomes leader for a particular term.
+// 'configuration' is the servers in the cluster.
+// 'startIndex' is the first index created in this term (see
+// its description above).
+func newCommitment(commitCh chan struct{}, configuration Configuration, startIndex uint64) *commitment {
+	matchIndexes := make(map[ServerID]uint64)
+	for _, server := range configuration.Servers {
+		if server.Suffrage == Voter {
+			matchIndexes[server.ID] = 0
+		}
+	}
+	return &commitment{
+		commitCh:     commitCh,
+		matchIndexes: matchIndexes,
+		commitIndex:  0,
+		startIndex:   startIndex,
+	}
+}
+
+// Called when a new cluster membership configuration is created: it will be
+// used to determine commitment from now on. 'configuration' is the servers in
+// the cluster.
+func (c *commitment) setConfiguration(configuration Configuration) {
+	c.Lock()
+	defer c.Unlock()
+	oldMatchIndexes := c.matchIndexes
+	c.matchIndexes = make(map[ServerID]uint64)
+	for _, server := range configuration.Servers {
+		if server.Suffrage == Voter {
+			c.matchIndexes[server.ID] = oldMatchIndexes[server.ID] // defaults to 0
+		}
+	}
+	c.recalculate()
+}
+
+// Called by leader after commitCh is notified
+func (c *commitment) getCommitIndex() uint64 {
+	c.Lock()
+	defer c.Unlock()
+	return c.commitIndex
+}
+
+// Match is called once a server completes writing entries to disk: either the
+// leader has written the new entry or a follower has replied to an
+// AppendEntries RPC. The given server's disk agrees with this server's log up
+// through the given index.
+func (c *commitment) match(server ServerID, matchIndex uint64) {
+	c.Lock()
+	defer c.Unlock()
+	if prev, hasVote := c.matchIndexes[server]; hasVote && matchIndex > prev {
+		c.matchIndexes[server] = matchIndex
+		c.recalculate()
+	}
+}
+
+// Internal helper to calculate new commitIndex from matchIndexes.
+// Must be called with lock held.
+func (c *commitment) recalculate() {
+	if len(c.matchIndexes) == 0 {
+		return
+	}
+
+	matched := make([]uint64, 0, len(c.matchIndexes))
+	for _, idx := range c.matchIndexes {
+		matched = append(matched, idx)
+	}
+	sort.Sort(uint64Slice(matched))
+	quorumMatchIndex := matched[(len(matched)-1)/2]
+
+	if quorumMatchIndex > c.commitIndex && quorumMatchIndex >= c.startIndex {
+		c.commitIndex = quorumMatchIndex
+		asyncNotifyCh(c.commitCh)
+	}
+}
diff --git a/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/raft/config.go
new file mode 100644
index 0000000000..66d4d0fa08
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/config.go
@@ -0,0 +1,265 @@
+package raft
+
+import (
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/hashicorp/go-hclog"
+)
+
+// These are the versions of the protocol (which includes RPC messages as
+// well as Raft-specific log entries) that this server can _understand_. Use
+// the ProtocolVersion member of the Config object to control the version of
+// the protocol to use when _speaking_ to other servers. Note that depending on
+// the protocol version being spoken, some otherwise understood RPC messages
+// may be refused. See dispositionRPC for details of this logic.
+//
+// There are notes about the upgrade path in the description of the versions
+// below. If you are starting a fresh cluster then there's no reason not to
+// jump right to the latest protocol version. If you need to interoperate with
+// older, version 0 Raft servers you'll need to drive the cluster through the
+// different versions in order.
+//
+// The version details are complicated, but here's a summary of what's required
+// to get from a version 0 cluster to version 3:
+//
+// 1. In version N of your app that starts using the new Raft library with
+//    versioning, set ProtocolVersion to 1.
+// 2. Make version N+1 of your app require version N as a prerequisite (all
+//    servers must be upgraded). For version N+1 of your app set ProtocolVersion
+//    to 2.
+// 3. Similarly, make version N+2 of your app require version N+1 as a
+//    prerequisite. For version N+2 of your app, set ProtocolVersion to 3.
+//
+// During this upgrade, older cluster members will still have Server IDs equal
+// to their network addresses. To upgrade an older member and give it an ID, it
+// needs to leave the cluster and re-enter:
+//
+// 1. Remove the server from the cluster with RemoveServer, using its network
+//    address as its ServerID.
+// 2. Update the server's config to use a UUID or something else that is
+//	  not tied to the machine as the ServerID (restarting the server).
+// 3. Add the server back to the cluster with AddVoter, using its new ID.
+//
+// You can do this during the rolling upgrade from N+1 to N+2 of your app, or
+// as a rolling change at any time after the upgrade.
+//
+// Version History
+//
+// 0: Original Raft library before versioning was added. Servers running this
+//    version of the Raft library use AddPeerDeprecated/RemovePeerDeprecated
+//    for all configuration changes, and have no support for LogConfiguration.
+// 1: First versioned protocol, used to interoperate with old servers, and begin
+//    the migration path to newer versions of the protocol. Under this version
+//    all configuration changes are propagated using the now-deprecated
+//    RemovePeerDeprecated Raft log entry. This means that server IDs are always
+//    set to be the same as the server addresses (since the old log entry type
+//    cannot transmit an ID), and only AddPeer/RemovePeer APIs are supported.
+//    Servers running this version of the protocol can understand the new
+//    LogConfiguration Raft log entry but will never generate one so they can
+//    remain compatible with version 0 Raft servers in the cluster.
+// 2: Transitional protocol used when migrating an existing cluster to the new
+//    server ID system. Server IDs are still set to be the same as server
+//    addresses, but all configuration changes are propagated using the new
+//    LogConfiguration Raft log entry type, which can carry full ID information.
+//    This version supports the old AddPeer/RemovePeer APIs as well as the new
+//    ID-based AddVoter/RemoveServer APIs which should be used when adding
+//    version 3 servers to the cluster later. This version sheds all
+//    interoperability with version 0 servers, but can interoperate with newer
+//    Raft servers running with protocol version 1 since they can understand the
+//    new LogConfiguration Raft log entry, and this version can still understand
+//    their RemovePeerDeprecated Raft log entries. We need this protocol version
+//    as an intermediate step between 1 and 3 so that servers will propagate the
+//    ID information that will come from newly-added (or -rolled) servers using
+//    protocol version 3, but since they are still using their address-based IDs
+//    from the previous step they will still be able to track commitments and
+//    their own voting status properly. If we skipped this step, servers would
+//    be started with their new IDs, but they wouldn't see themselves in the old
+//    address-based configuration, so none of the servers would think they had a
+//    vote.
+// 3: Protocol adding full support for server IDs and new ID-based server APIs
+//    (AddVoter, AddNonvoter, etc.), old AddPeer/RemovePeer APIs are no longer
+//    supported. Version 2 servers should be swapped out by removing them from
+//    the cluster one-by-one and re-adding them with updated configuration for
+//    this protocol version, along with their server ID. The remove/add cycle
+//    is required to populate their server ID. Note that removing must be done
+//    by ID, which will be the old server's address.
+type ProtocolVersion int
+
+const (
+	ProtocolVersionMin ProtocolVersion = 0
+	ProtocolVersionMax                 = 3
+)
+
+// These are versions of snapshots that this server can _understand_. Currently,
+// it is always assumed that this server generates the latest version, though
+// this may be changed in the future to include a configurable version.
+//
+// Version History
+//
+// 0: Original Raft library before versioning was added. The peers portion of
+//    these snapshots is encoded in the legacy format which requires decodePeers
+//    to parse. This version of snapshots should only be produced by the
+//    unversioned Raft library.
+// 1: New format which adds support for a full configuration structure and its
+//    associated log index, with support for server IDs and non-voting server
+//    modes. To ease upgrades, this also includes the legacy peers structure but
+//    that will never be used by servers that understand version 1 snapshots.
+//    Since the original Raft library didn't enforce any versioning, we must
+//    include the legacy peers structure for this version, but we can deprecate
+//    it in the next snapshot version.
+type SnapshotVersion int
+
+const (
+	SnapshotVersionMin SnapshotVersion = 0
+	SnapshotVersionMax                 = 1
+)
+
+// Config provides any necessary configuration for the Raft server.
+type Config struct {
+	// ProtocolVersion allows a Raft server to inter-operate with older
+	// Raft servers running an older version of the code. This is used to
+	// version the wire protocol as well as Raft-specific log entries that
+	// the server uses when _speaking_ to other servers. There is currently
+	// no auto-negotiation of versions so all servers must be manually
+	// configured with compatible versions. See ProtocolVersionMin and
+	// ProtocolVersionMax for the versions of the protocol that this server
+	// can _understand_.
+	ProtocolVersion ProtocolVersion
+
+	// HeartbeatTimeout specifies the time in follower state without
+	// a leader before we attempt an election.
+	HeartbeatTimeout time.Duration
+
+	// ElectionTimeout specifies the time in candidate state without
+	// a leader before we attempt an election.
+	ElectionTimeout time.Duration
+
+	// CommitTimeout controls the time without an Apply() operation
+	// before we heartbeat to ensure a timely commit. Due to random
+	// staggering, may be delayed as much as 2x this value.
+	CommitTimeout time.Duration
+
+	// MaxAppendEntries controls the maximum number of append entries
+	// to send at once. We want to strike a balance between efficiency
+	// and avoiding waste if the follower is going to reject because of
+	// an inconsistent log.
+	MaxAppendEntries int
+
+	// If we are a member of a cluster, and RemovePeer is invoked for the
+	// local node, then we forget all peers and transition into the follower state.
+	// If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise,
+	// we can become a leader of a cluster containing only this node.
+	ShutdownOnRemove bool
+
+	// TrailingLogs controls how many logs we leave after a snapshot. This is
+	// used so that we can quickly replay logs on a follower instead of being
+	// forced to send an entire snapshot.
+	TrailingLogs uint64
+
+	// SnapshotInterval controls how often we check if we should perform a snapshot.
+	// We randomly stagger between this value and 2x this value to avoid the entire
+	// cluster from performing a snapshot at once.
+	SnapshotInterval time.Duration
+
+	// SnapshotThreshold controls how many outstanding logs there must be before
+	// we perform a snapshot. This is to prevent excessive snapshots when we can
+	// just replay a small set of logs.
+	SnapshotThreshold uint64
+
+	// LeaderLeaseTimeout is used to control how long the "lease" lasts
+	// for being the leader without being able to contact a quorum
+	// of nodes. If we reach this interval without contact, we will
+	// step down as leader.
+	LeaderLeaseTimeout time.Duration
+
+	// StartAsLeader forces Raft to start in the leader state. This should
+	// never be used except for testing purposes, as it can cause a split-brain.
+	StartAsLeader bool
+
+	// The unique ID for this server across all time. When running with
+	// ProtocolVersion < 3, you must set this to be the same as the network
+	// address of your transport.
+	LocalID ServerID
+
+	// NotifyCh is used to provide a channel that will be notified of leadership
+	// changes. Raft will block writing to this channel, so it should either be
+	// buffered or aggressively consumed.
+	NotifyCh chan<- bool
+
+	// LogOutput is used as a sink for logs, unless Logger is specified.
+	// Defaults to os.Stderr.
+	LogOutput io.Writer
+
+	// LogLevel represents a log level. If a no matching string is specified,
+	// hclog.NoLevel is assumed.
+	LogLevel string
+
+	// Logger is a user-provided hc-log logger. If nil, a logger writing to
+	// LogOutput with LogLevel is used.
+	Logger hclog.Logger
+}
+
+// DefaultConfig returns a Config with usable defaults.
+func DefaultConfig() *Config {
+	return &Config{
+		ProtocolVersion:    ProtocolVersionMax,
+		HeartbeatTimeout:   1000 * time.Millisecond,
+		ElectionTimeout:    1000 * time.Millisecond,
+		CommitTimeout:      50 * time.Millisecond,
+		MaxAppendEntries:   64,
+		ShutdownOnRemove:   true,
+		TrailingLogs:       10240,
+		SnapshotInterval:   120 * time.Second,
+		SnapshotThreshold:  8192,
+		LeaderLeaseTimeout: 500 * time.Millisecond,
+		LogLevel:           "DEBUG",
+	}
+}
+
+// ValidateConfig is used to validate a sane configuration
+func ValidateConfig(config *Config) error {
+	// We don't actually support running as 0 in the library any more, but
+	// we do understand it.
+	protocolMin := ProtocolVersionMin
+	if protocolMin == 0 {
+		protocolMin = 1
+	}
+	if config.ProtocolVersion < protocolMin ||
+		config.ProtocolVersion > ProtocolVersionMax {
+		return fmt.Errorf("Protocol version %d must be >= %d and <= %d",
+			config.ProtocolVersion, protocolMin, ProtocolVersionMax)
+	}
+	if len(config.LocalID) == 0 {
+		return fmt.Errorf("LocalID cannot be empty")
+	}
+	if config.HeartbeatTimeout < 5*time.Millisecond {
+		return fmt.Errorf("Heartbeat timeout is too low")
+	}
+	if config.ElectionTimeout < 5*time.Millisecond {
+		return fmt.Errorf("Election timeout is too low")
+	}
+	if config.CommitTimeout < time.Millisecond {
+		return fmt.Errorf("Commit timeout is too low")
+	}
+	if config.MaxAppendEntries <= 0 {
+		return fmt.Errorf("MaxAppendEntries must be positive")
+	}
+	if config.MaxAppendEntries > 1024 {
+		return fmt.Errorf("MaxAppendEntries is too large")
+	}
+	if config.SnapshotInterval < 5*time.Millisecond {
+		return fmt.Errorf("Snapshot interval is too low")
+	}
+	if config.LeaderLeaseTimeout < 5*time.Millisecond {
+		return fmt.Errorf("Leader lease timeout is too low")
+	}
+	if config.LeaderLeaseTimeout > config.HeartbeatTimeout {
+		return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout")
+	}
+	if config.ElectionTimeout < config.HeartbeatTimeout {
+		return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout")
+	}
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/raft/configuration.go b/vendor/github.com/hashicorp/raft/configuration.go
new file mode 100644
index 0000000000..4bb784d0bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/configuration.go
@@ -0,0 +1,343 @@
+package raft
+
+import "fmt"
+
+// ServerSuffrage determines whether a Server in a Configuration gets a vote.
+type ServerSuffrage int
+
+// Note: Don't renumber these, since the numbers are written into the log.
+const (
+	// Voter is a server whose vote is counted in elections and whose match index
+	// is used in advancing the leader's commit index.
+	Voter ServerSuffrage = iota
+	// Nonvoter is a server that receives log entries but is not considered for
+	// elections or commitment purposes.
+	Nonvoter
+	// Staging is a server that acts like a nonvoter with one exception: once a
+	// staging server receives enough log entries to be sufficiently caught up to
+	// the leader's log, the leader will invoke a  membership change to change
+	// the Staging server to a Voter.
+	Staging
+)
+
+func (s ServerSuffrage) String() string {
+	switch s {
+	case Voter:
+		return "Voter"
+	case Nonvoter:
+		return "Nonvoter"
+	case Staging:
+		return "Staging"
+	}
+	return "ServerSuffrage"
+}
+
+// ServerID is a unique string identifying a server for all time.
+type ServerID string
+
+// ServerAddress is a network address for a server that a transport can contact.
+type ServerAddress string
+
+// Server tracks the information about a single server in a configuration.
+type Server struct {
+	// Suffrage determines whether the server gets a vote.
+	Suffrage ServerSuffrage
+	// ID is a unique string identifying this server for all time.
+	ID ServerID
+	// Address is its network address that a transport can contact.
+	Address ServerAddress
+}
+
+// Configuration tracks which servers are in the cluster, and whether they have
+// votes. This should include the local server, if it's a member of the cluster.
+// The servers are listed no particular order, but each should only appear once.
+// These entries are appended to the log during membership changes.
+type Configuration struct {
+	Servers []Server
+}
+
+// Clone makes a deep copy of a Configuration.
+func (c *Configuration) Clone() (copy Configuration) {
+	copy.Servers = append(copy.Servers, c.Servers...)
+	return
+}
+
+// ConfigurationChangeCommand is the different ways to change the cluster
+// configuration.
+type ConfigurationChangeCommand uint8
+
+const (
+	// AddStaging makes a server Staging unless its Voter.
+	AddStaging ConfigurationChangeCommand = iota
+	// AddNonvoter makes a server Nonvoter unless its Staging or Voter.
+	AddNonvoter
+	// DemoteVoter makes a server Nonvoter unless its absent.
+	DemoteVoter
+	// RemoveServer removes a server entirely from the cluster membership.
+	RemoveServer
+	// Promote is created automatically by a leader; it turns a Staging server
+	// into a Voter.
+	Promote
+)
+
+func (c ConfigurationChangeCommand) String() string {
+	switch c {
+	case AddStaging:
+		return "AddStaging"
+	case AddNonvoter:
+		return "AddNonvoter"
+	case DemoteVoter:
+		return "DemoteVoter"
+	case RemoveServer:
+		return "RemoveServer"
+	case Promote:
+		return "Promote"
+	}
+	return "ConfigurationChangeCommand"
+}
+
+// configurationChangeRequest describes a change that a leader would like to
+// make to its current configuration. It's used only within a single server
+// (never serialized into the log), as part of `configurationChangeFuture`.
+type configurationChangeRequest struct {
+	command       ConfigurationChangeCommand
+	serverID      ServerID
+	serverAddress ServerAddress // only present for AddStaging, AddNonvoter
+	// prevIndex, if nonzero, is the index of the only configuration upon which
+	// this change may be applied; if another configuration entry has been
+	// added in the meantime, this request will fail.
+	prevIndex uint64
+}
+
+// configurations is state tracked on every server about its Configurations.
+// Note that, per Diego's dissertation, there can be at most one uncommitted
+// configuration at a time (the next configuration may not be created until the
+// prior one has been committed).
+//
+// One downside to storing just two configurations is that if you try to take a
+// snapshot when your state machine hasn't yet applied the committedIndex, we
+// have no record of the configuration that would logically fit into that
+// snapshot. We disallow snapshots in that case now. An alternative approach,
+// which LogCabin uses, is to track every configuration change in the
+// log.
+type configurations struct {
+	// committed is the latest configuration in the log/snapshot that has been
+	// committed (the one with the largest index).
+	committed Configuration
+	// committedIndex is the log index where 'committed' was written.
+	committedIndex uint64
+	// latest is the latest configuration in the log/snapshot (may be committed
+	// or uncommitted)
+	latest Configuration
+	// latestIndex is the log index where 'latest' was written.
+	latestIndex uint64
+}
+
+// Clone makes a deep copy of a configurations object.
+func (c *configurations) Clone() (copy configurations) {
+	copy.committed = c.committed.Clone()
+	copy.committedIndex = c.committedIndex
+	copy.latest = c.latest.Clone()
+	copy.latestIndex = c.latestIndex
+	return
+}
+
+// hasVote returns true if the server identified by 'id' is a Voter in the
+// provided Configuration.
+func hasVote(configuration Configuration, id ServerID) bool {
+	for _, server := range configuration.Servers {
+		if server.ID == id {
+			return server.Suffrage == Voter
+		}
+	}
+	return false
+}
+
+// checkConfiguration tests a cluster membership configuration for common
+// errors.
+func checkConfiguration(configuration Configuration) error {
+	idSet := make(map[ServerID]bool)
+	addressSet := make(map[ServerAddress]bool)
+	var voters int
+	for _, server := range configuration.Servers {
+		if server.ID == "" {
+			return fmt.Errorf("Empty ID in configuration: %v", configuration)
+		}
+		if server.Address == "" {
+			return fmt.Errorf("Empty address in configuration: %v", server)
+		}
+		if idSet[server.ID] {
+			return fmt.Errorf("Found duplicate ID in configuration: %v", server.ID)
+		}
+		idSet[server.ID] = true
+		if addressSet[server.Address] {
+			return fmt.Errorf("Found duplicate address in configuration: %v", server.Address)
+		}
+		addressSet[server.Address] = true
+		if server.Suffrage == Voter {
+			voters++
+		}
+	}
+	if voters == 0 {
+		return fmt.Errorf("Need at least one voter in configuration: %v", configuration)
+	}
+	return nil
+}
+
+// nextConfiguration generates a new Configuration from the current one and a
+// configuration change request. It's split from appendConfigurationEntry so
+// that it can be unit tested easily.
+func nextConfiguration(current Configuration, currentIndex uint64, change configurationChangeRequest) (Configuration, error) {
+	if change.prevIndex > 0 && change.prevIndex != currentIndex {
+		return Configuration{}, fmt.Errorf("Configuration changed since %v (latest is %v)", change.prevIndex, currentIndex)
+	}
+
+	configuration := current.Clone()
+	switch change.command {
+	case AddStaging:
+		// TODO: barf on new address?
+		newServer := Server{
+			// TODO: This should add the server as Staging, to be automatically
+			// promoted to Voter later. However, the promotion to Voter is not yet
+			// implemented, and doing so is not trivial with the way the leader loop
+			// coordinates with the replication goroutines today. So, for now, the
+			// server will have a vote right away, and the Promote case below is
+			// unused.
+			Suffrage: Voter,
+			ID:       change.serverID,
+			Address:  change.serverAddress,
+		}
+		found := false
+		for i, server := range configuration.Servers {
+			if server.ID == change.serverID {
+				if server.Suffrage == Voter {
+					configuration.Servers[i].Address = change.serverAddress
+				} else {
+					configuration.Servers[i] = newServer
+				}
+				found = true
+				break
+			}
+		}
+		if !found {
+			configuration.Servers = append(configuration.Servers, newServer)
+		}
+	case AddNonvoter:
+		newServer := Server{
+			Suffrage: Nonvoter,
+			ID:       change.serverID,
+			Address:  change.serverAddress,
+		}
+		found := false
+		for i, server := range configuration.Servers {
+			if server.ID == change.serverID {
+				if server.Suffrage != Nonvoter {
+					configuration.Servers[i].Address = change.serverAddress
+				} else {
+					configuration.Servers[i] = newServer
+				}
+				found = true
+				break
+			}
+		}
+		if !found {
+			configuration.Servers = append(configuration.Servers, newServer)
+		}
+	case DemoteVoter:
+		for i, server := range configuration.Servers {
+			if server.ID == change.serverID {
+				configuration.Servers[i].Suffrage = Nonvoter
+				break
+			}
+		}
+	case RemoveServer:
+		for i, server := range configuration.Servers {
+			if server.ID == change.serverID {
+				configuration.Servers = append(configuration.Servers[:i], configuration.Servers[i+1:]...)
+				break
+			}
+		}
+	case Promote:
+		for i, server := range configuration.Servers {
+			if server.ID == change.serverID && server.Suffrage == Staging {
+				configuration.Servers[i].Suffrage = Voter
+				break
+			}
+		}
+	}
+
+	// Make sure we didn't do something bad like remove the last voter
+	if err := checkConfiguration(configuration); err != nil {
+		return Configuration{}, err
+	}
+
+	return configuration, nil
+}
+
+// encodePeers is used to serialize a Configuration into the old peers format.
+// This is here for backwards compatibility when operating with a mix of old
+// servers and should be removed once we deprecate support for protocol version 1.
+func encodePeers(configuration Configuration, trans Transport) []byte {
+	// Gather up all the voters, other suffrage types are not supported by
+	// this data format.
+	var encPeers [][]byte
+	for _, server := range configuration.Servers {
+		if server.Suffrage == Voter {
+			encPeers = append(encPeers, trans.EncodePeer(server.ID, server.Address))
+		}
+	}
+
+	// Encode the entire array.
+	buf, err := encodeMsgPack(encPeers)
+	if err != nil {
+		panic(fmt.Errorf("failed to encode peers: %v", err))
+	}
+
+	return buf.Bytes()
+}
+
+// decodePeers is used to deserialize an old list of peers into a Configuration.
+// This is here for backwards compatibility with old log entries and snapshots;
+// it should be removed eventually.
+func decodePeers(buf []byte, trans Transport) Configuration {
+	// Decode the buffer first.
+	var encPeers [][]byte
+	if err := decodeMsgPack(buf, &encPeers); err != nil {
+		panic(fmt.Errorf("failed to decode peers: %v", err))
+	}
+
+	// Deserialize each peer.
+	var servers []Server
+	for _, enc := range encPeers {
+		p := trans.DecodePeer(enc)
+		servers = append(servers, Server{
+			Suffrage: Voter,
+			ID:       ServerID(p),
+			Address:  ServerAddress(p),
+		})
+	}
+
+	return Configuration{
+		Servers: servers,
+	}
+}
+
+// encodeConfiguration serializes a Configuration using MsgPack, or panics on
+// errors.
+func encodeConfiguration(configuration Configuration) []byte {
+	buf, err := encodeMsgPack(configuration)
+	if err != nil {
+		panic(fmt.Errorf("failed to encode configuration: %v", err))
+	}
+	return buf.Bytes()
+}
+
+// decodeConfiguration deserializes a Configuration using MsgPack, or panics on
+// errors.
+func decodeConfiguration(buf []byte) Configuration {
+	var configuration Configuration
+	if err := decodeMsgPack(buf, &configuration); err != nil {
+		panic(fmt.Errorf("failed to decode configuration: %v", err))
+	}
+	return configuration
+}
diff --git a/vendor/github.com/hashicorp/raft/discard_snapshot.go b/vendor/github.com/hashicorp/raft/discard_snapshot.go
new file mode 100644
index 0000000000..5e93a9fe01
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/discard_snapshot.go
@@ -0,0 +1,49 @@
+package raft
+
+import (
+	"fmt"
+	"io"
+)
+
+// DiscardSnapshotStore is used to successfully snapshot while
+// always discarding the snapshot. This is useful for when the
+// log should be truncated but no snapshot should be retained.
+// This should never be used for production use, and is only
+// suitable for testing.
+type DiscardSnapshotStore struct{}
+
+type DiscardSnapshotSink struct{}
+
+// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore.
+func NewDiscardSnapshotStore() *DiscardSnapshotStore {
+	return &DiscardSnapshotStore{}
+}
+
+func (d *DiscardSnapshotStore) Create(version SnapshotVersion, index, term uint64,
+	configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) {
+	return &DiscardSnapshotSink{}, nil
+}
+
+func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) {
+	return nil, nil
+}
+
+func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) {
+	return nil, nil, fmt.Errorf("open is not supported")
+}
+
+func (d *DiscardSnapshotSink) Write(b []byte) (int, error) {
+	return len(b), nil
+}
+
+func (d *DiscardSnapshotSink) Close() error {
+	return nil
+}
+
+func (d *DiscardSnapshotSink) ID() string {
+	return "discard"
+}
+
+func (d *DiscardSnapshotSink) Cancel() error {
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/raft/file_snapshot.go b/vendor/github.com/hashicorp/raft/file_snapshot.go
new file mode 100644
index 0000000000..ffc9414542
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/file_snapshot.go
@@ -0,0 +1,528 @@
+package raft
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"hash"
+	"hash/crc64"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	testPath      = "permTest"
+	snapPath      = "snapshots"
+	metaFilePath  = "meta.json"
+	stateFilePath = "state.bin"
+	tmpSuffix     = ".tmp"
+)
+
+// FileSnapshotStore implements the SnapshotStore interface and allows
+// snapshots to be made on the local disk.
+type FileSnapshotStore struct {
+	path   string
+	retain int
+	logger *log.Logger
+}
+
+type snapMetaSlice []*fileSnapshotMeta
+
+// FileSnapshotSink implements SnapshotSink with a file.
+type FileSnapshotSink struct {
+	store     *FileSnapshotStore
+	logger    *log.Logger
+	dir       string
+	parentDir string
+	meta      fileSnapshotMeta
+
+	stateFile *os.File
+	stateHash hash.Hash64
+	buffered  *bufio.Writer
+
+	closed bool
+}
+
+// fileSnapshotMeta is stored on disk. We also put a CRC
+// on disk so that we can verify the snapshot.
+type fileSnapshotMeta struct {
+	SnapshotMeta
+	CRC []byte
+}
+
+// bufferedFile is returned when we open a snapshot. This way
+// reads are buffered and the file still gets closed.
+type bufferedFile struct {
+	bh *bufio.Reader
+	fh *os.File
+}
+
+func (b *bufferedFile) Read(p []byte) (n int, err error) {
+	return b.bh.Read(p)
+}
+
+func (b *bufferedFile) Close() error {
+	return b.fh.Close()
+}
+
+// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based
+// on a base directory. The `retain` parameter controls how many
+// snapshots are retained. Must be at least 1.
+func NewFileSnapshotStoreWithLogger(base string, retain int, logger *log.Logger) (*FileSnapshotStore, error) {
+	if retain < 1 {
+		return nil, fmt.Errorf("must retain at least one snapshot")
+	}
+	if logger == nil {
+		logger = log.New(os.Stderr, "", log.LstdFlags)
+	}
+
+	// Ensure our path exists
+	path := filepath.Join(base, snapPath)
+	if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
+		return nil, fmt.Errorf("snapshot path not accessible: %v", err)
+	}
+
+	// Setup the store
+	store := &FileSnapshotStore{
+		path:   path,
+		retain: retain,
+		logger: logger,
+	}
+
+	// Do a permissions test
+	if err := store.testPermissions(); err != nil {
+		return nil, fmt.Errorf("permissions test failed: %v", err)
+	}
+	return store, nil
+}
+
+// NewFileSnapshotStore creates a new FileSnapshotStore based
+// on a base directory. The `retain` parameter controls how many
+// snapshots are retained. Must be at least 1.
+func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) {
+	if logOutput == nil {
+		logOutput = os.Stderr
+	}
+	return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags))
+}
+
+// testPermissions tries to touch a file in our path to see if it works.
+func (f *FileSnapshotStore) testPermissions() error {
+	path := filepath.Join(f.path, testPath)
+	fh, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+
+	if err = fh.Close(); err != nil {
+		return err
+	}
+
+	if err = os.Remove(path); err != nil {
+		return err
+	}
+	return nil
+}
+
+// snapshotName generates a name for the snapshot.
+func snapshotName(term, index uint64) string {
+	now := time.Now()
+	msec := now.UnixNano() / int64(time.Millisecond)
+	return fmt.Sprintf("%d-%d-%d", term, index, msec)
+}
+
+// Create is used to start a new snapshot
+func (f *FileSnapshotStore) Create(version SnapshotVersion, index, term uint64,
+	configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) {
+	// We only support version 1 snapshots at this time.
+	if version != 1 {
+		return nil, fmt.Errorf("unsupported snapshot version %d", version)
+	}
+
+	// Create a new path
+	name := snapshotName(term, index)
+	path := filepath.Join(f.path, name+tmpSuffix)
+	f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path)
+
+	// Make the directory
+	if err := os.MkdirAll(path, 0755); err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err)
+		return nil, err
+	}
+
+	// Create the sink
+	sink := &FileSnapshotSink{
+		store:     f,
+		logger:    f.logger,
+		dir:       path,
+		parentDir: f.path,
+		meta: fileSnapshotMeta{
+			SnapshotMeta: SnapshotMeta{
+				Version:            version,
+				ID:                 name,
+				Index:              index,
+				Term:               term,
+				Peers:              encodePeers(configuration, trans),
+				Configuration:      configuration,
+				ConfigurationIndex: configurationIndex,
+			},
+			CRC: nil,
+		},
+	}
+
+	// Write out the meta data
+	if err := sink.writeMeta(); err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err)
+		return nil, err
+	}
+
+	// Open the state file
+	statePath := filepath.Join(path, stateFilePath)
+	fh, err := os.Create(statePath)
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err)
+		return nil, err
+	}
+	sink.stateFile = fh
+
+	// Create a CRC64 hash
+	sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA))
+
+	// Wrap both the hash and file in a MultiWriter with buffering
+	multi := io.MultiWriter(sink.stateFile, sink.stateHash)
+	sink.buffered = bufio.NewWriter(multi)
+
+	// Done
+	return sink, nil
+}
+
+// List returns available snapshots in the store.
+func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) {
+	// Get the eligible snapshots
+	snapshots, err := f.getSnapshots()
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err)
+		return nil, err
+	}
+
+	var snapMeta []*SnapshotMeta
+	for _, meta := range snapshots {
+		snapMeta = append(snapMeta, &meta.SnapshotMeta)
+		if len(snapMeta) == f.retain {
+			break
+		}
+	}
+	return snapMeta, nil
+}
+
+// getSnapshots returns all the known snapshots.
+func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) {
+	// Get the eligible snapshots
+	snapshots, err := ioutil.ReadDir(f.path)
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err)
+		return nil, err
+	}
+
+	// Populate the metadata
+	var snapMeta []*fileSnapshotMeta
+	for _, snap := range snapshots {
+		// Ignore any files
+		if !snap.IsDir() {
+			continue
+		}
+
+		// Ignore any temporary snapshots
+		dirName := snap.Name()
+		if strings.HasSuffix(dirName, tmpSuffix) {
+			f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName)
+			continue
+		}
+
+		// Try to read the meta data
+		meta, err := f.readMeta(dirName)
+		if err != nil {
+			f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err)
+			continue
+		}
+
+		// Make sure we can understand this version.
+		if meta.Version < SnapshotVersionMin || meta.Version > SnapshotVersionMax {
+			f.logger.Printf("[WARN] snapshot: Snapshot version for %v not supported: %d", dirName, meta.Version)
+			continue
+		}
+
+		// Append, but only return up to the retain count
+		snapMeta = append(snapMeta, meta)
+	}
+
+	// Sort the snapshot, reverse so we get new -> old
+	sort.Sort(sort.Reverse(snapMetaSlice(snapMeta)))
+
+	return snapMeta, nil
+}
+
+// readMeta is used to read the meta data for a given named backup
+func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) {
+	// Open the meta file
+	metaPath := filepath.Join(f.path, name, metaFilePath)
+	fh, err := os.Open(metaPath)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	// Buffer the file IO
+	buffered := bufio.NewReader(fh)
+
+	// Read in the JSON
+	meta := &fileSnapshotMeta{}
+	dec := json.NewDecoder(buffered)
+	if err := dec.Decode(meta); err != nil {
+		return nil, err
+	}
+	return meta, nil
+}
+
+// Open takes a snapshot ID and returns a ReadCloser for that snapshot.
+func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) {
+	// Get the metadata
+	meta, err := f.readMeta(id)
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err)
+		return nil, nil, err
+	}
+
+	// Open the state file
+	statePath := filepath.Join(f.path, id, stateFilePath)
+	fh, err := os.Open(statePath)
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err)
+		return nil, nil, err
+	}
+
+	// Create a CRC64 hash
+	stateHash := crc64.New(crc64.MakeTable(crc64.ECMA))
+
+	// Compute the hash
+	_, err = io.Copy(stateHash, fh)
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err)
+		fh.Close()
+		return nil, nil, err
+	}
+
+	// Verify the hash
+	computed := stateHash.Sum(nil)
+	if bytes.Compare(meta.CRC, computed) != 0 {
+		f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)",
+			meta.CRC, computed)
+		fh.Close()
+		return nil, nil, fmt.Errorf("CRC mismatch")
+	}
+
+	// Seek to the start
+	if _, err := fh.Seek(0, 0); err != nil {
+		f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err)
+		fh.Close()
+		return nil, nil, err
+	}
+
+	// Return a buffered file
+	buffered := &bufferedFile{
+		bh: bufio.NewReader(fh),
+		fh: fh,
+	}
+
+	return &meta.SnapshotMeta, buffered, nil
+}
+
+// ReapSnapshots reaps any snapshots beyond the retain count.
+func (f *FileSnapshotStore) ReapSnapshots() error {
+	snapshots, err := f.getSnapshots()
+	if err != nil {
+		f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err)
+		return err
+	}
+
+	for i := f.retain; i < len(snapshots); i++ {
+		path := filepath.Join(f.path, snapshots[i].ID)
+		f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path)
+		if err := os.RemoveAll(path); err != nil {
+			f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err)
+			return err
+		}
+	}
+	return nil
+}
+
+// ID returns the ID of the snapshot, can be used with Open()
+// after the snapshot is finalized.
+func (s *FileSnapshotSink) ID() string {
+	return s.meta.ID
+}
+
+// Write is used to append to the state file. We write to the
+// buffered IO object to reduce the amount of context switches.
+func (s *FileSnapshotSink) Write(b []byte) (int, error) {
+	return s.buffered.Write(b)
+}
+
+// Close is used to indicate a successful end.
+func (s *FileSnapshotSink) Close() error {
+	// Make sure close is idempotent
+	if s.closed {
+		return nil
+	}
+	s.closed = true
+
+	// Close the open handles
+	if err := s.finalize(); err != nil {
+		s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err)
+		if delErr := os.RemoveAll(s.dir); delErr != nil {
+			s.logger.Printf("[ERR] snapshot: Failed to delete temporary snapshot directory at path %v: %v", s.dir, delErr)
+			return delErr
+		}
+		return err
+	}
+
+	// Write out the meta data
+	if err := s.writeMeta(); err != nil {
+		s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err)
+		return err
+	}
+
+	// Move the directory into place
+	newPath := strings.TrimSuffix(s.dir, tmpSuffix)
+	if err := os.Rename(s.dir, newPath); err != nil {
+		s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err)
+		return err
+	}
+
+	if runtime.GOOS != "windows" { //skipping fsync for directory entry edits on Windows, only needed for *nix style file systems
+		parentFH, err := os.Open(s.parentDir)
+		defer parentFH.Close()
+		if err != nil {
+			s.logger.Printf("[ERR] snapshot: Failed to open snapshot parent directory %v, error: %v", s.parentDir, err)
+			return err
+		}
+
+		if err = parentFH.Sync(); err != nil {
+			s.logger.Printf("[ERR] snapshot: Failed syncing parent directory %v, error: %v", s.parentDir, err)
+			return err
+		}
+	}
+
+	// Reap any old snapshots
+	if err := s.store.ReapSnapshots(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Cancel is used to indicate an unsuccessful end.
+func (s *FileSnapshotSink) Cancel() error {
+	// Make sure close is idempotent
+	if s.closed {
+		return nil
+	}
+	s.closed = true
+
+	// Close the open handles
+	if err := s.finalize(); err != nil {
+		s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err)
+		return err
+	}
+
+	// Attempt to remove all artifacts
+	return os.RemoveAll(s.dir)
+}
+
+// finalize is used to close all of our resources.
+func (s *FileSnapshotSink) finalize() error {
+	// Flush any remaining data
+	if err := s.buffered.Flush(); err != nil {
+		return err
+	}
+
+	// Sync to force fsync to disk
+	if err := s.stateFile.Sync(); err != nil {
+		return err
+	}
+
+	// Get the file size
+	stat, statErr := s.stateFile.Stat()
+
+	// Close the file
+	if err := s.stateFile.Close(); err != nil {
+		return err
+	}
+
+	// Set the file size, check after we close
+	if statErr != nil {
+		return statErr
+	}
+	s.meta.Size = stat.Size()
+
+	// Set the CRC
+	s.meta.CRC = s.stateHash.Sum(nil)
+	return nil
+}
+
+// writeMeta is used to write out the metadata we have.
+func (s *FileSnapshotSink) writeMeta() error {
+	// Open the meta file
+	metaPath := filepath.Join(s.dir, metaFilePath)
+	fh, err := os.Create(metaPath)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+
+	// Buffer the file IO
+	buffered := bufio.NewWriter(fh)
+
+	// Write out as JSON
+	enc := json.NewEncoder(buffered)
+	if err := enc.Encode(&s.meta); err != nil {
+		return err
+	}
+
+	if err = buffered.Flush(); err != nil {
+		return err
+	}
+
+	if err = fh.Sync(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Implement the sort interface for []*fileSnapshotMeta.
+func (s snapMetaSlice) Len() int {
+	return len(s)
+}
+
+func (s snapMetaSlice) Less(i, j int) bool {
+	if s[i].Term != s[j].Term {
+		return s[i].Term < s[j].Term
+	}
+	if s[i].Index != s[j].Index {
+		return s[i].Index < s[j].Index
+	}
+	return s[i].ID < s[j].ID
+}
+
+func (s snapMetaSlice) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
diff --git a/vendor/github.com/hashicorp/raft/fsm.go b/vendor/github.com/hashicorp/raft/fsm.go
new file mode 100644
index 0000000000..c89986c0fa
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/fsm.go
@@ -0,0 +1,136 @@
+package raft
+
+import (
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/armon/go-metrics"
+)
+
+// FSM provides an interface that can be implemented by
+// clients to make use of the replicated log.
+type FSM interface {
+	// Apply log is invoked once a log entry is committed.
+	// It returns a value which will be made available in the
+	// ApplyFuture returned by Raft.Apply method if that
+	// method was called on the same Raft node as the FSM.
+	Apply(*Log) interface{}
+
+	// Snapshot is used to support log compaction. This call should
+	// return an FSMSnapshot which can be used to save a point-in-time
+	// snapshot of the FSM. Apply and Snapshot are not called in multiple
+	// threads, but Apply will be called concurrently with Persist. This means
+	// the FSM should be implemented in a fashion that allows for concurrent
+	// updates while a snapshot is happening.
+	Snapshot() (FSMSnapshot, error)
+
+	// Restore is used to restore an FSM from a snapshot. It is not called
+	// concurrently with any other command. The FSM must discard all previous
+	// state.
+	Restore(io.ReadCloser) error
+}
+
+// FSMSnapshot is returned by an FSM in response to a Snapshot
+// It must be safe to invoke FSMSnapshot methods with concurrent
+// calls to Apply.
+type FSMSnapshot interface {
+	// Persist should dump all necessary state to the WriteCloser 'sink',
+	// and call sink.Close() when finished or call sink.Cancel() on error.
+	Persist(sink SnapshotSink) error
+
+	// Release is invoked when we are finished with the snapshot.
+	Release()
+}
+
+// runFSM is a long running goroutine responsible for applying logs
+// to the FSM. This is done async of other logs since we don't want
+// the FSM to block our internal operations.
+func (r *Raft) runFSM() {
+	var lastIndex, lastTerm uint64
+
+	commit := func(req *commitTuple) {
+		// Apply the log if a command
+		var resp interface{}
+		if req.log.Type == LogCommand {
+			start := time.Now()
+			resp = r.fsm.Apply(req.log)
+			metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start)
+		}
+
+		// Update the indexes
+		lastIndex = req.log.Index
+		lastTerm = req.log.Term
+
+		// Invoke the future if given
+		if req.future != nil {
+			req.future.response = resp
+			req.future.respond(nil)
+		}
+	}
+
+	restore := func(req *restoreFuture) {
+		// Open the snapshot
+		meta, source, err := r.snapshots.Open(req.ID)
+		if err != nil {
+			req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err))
+			return
+		}
+
+		// Attempt to restore
+		start := time.Now()
+		if err := r.fsm.Restore(source); err != nil {
+			req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err))
+			source.Close()
+			return
+		}
+		source.Close()
+		metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start)
+
+		// Update the last index and term
+		lastIndex = meta.Index
+		lastTerm = meta.Term
+		req.respond(nil)
+	}
+
+	snapshot := func(req *reqSnapshotFuture) {
+		// Is there something to snapshot?
+		if lastIndex == 0 {
+			req.respond(ErrNothingNewToSnapshot)
+			return
+		}
+
+		// Start a snapshot
+		start := time.Now()
+		snap, err := r.fsm.Snapshot()
+		metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start)
+
+		// Respond to the request
+		req.index = lastIndex
+		req.term = lastTerm
+		req.snapshot = snap
+		req.respond(err)
+	}
+
+	for {
+		select {
+		case ptr := <-r.fsmMutateCh:
+			switch req := ptr.(type) {
+			case *commitTuple:
+				commit(req)
+
+			case *restoreFuture:
+				restore(req)
+
+			default:
+				panic(fmt.Errorf("bad type passed to fsmMutateCh: %#v", ptr))
+			}
+
+		case req := <-r.fsmSnapshotCh:
+			snapshot(req)
+
+		case <-r.shutdownCh:
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/hashicorp/raft/future.go b/vendor/github.com/hashicorp/raft/future.go
new file mode 100644
index 0000000000..fac59a5cc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/future.go
@@ -0,0 +1,289 @@
+package raft
+
+import (
+	"fmt"
+	"io"
+	"sync"
+	"time"
+)
+
+// Future is used to represent an action that may occur in the future.
+type Future interface {
+	// Error blocks until the future arrives and then
+	// returns the error status of the future.
+	// This may be called any number of times - all
+	// calls will return the same value.
+	// Note that it is not OK to call this method
+	// twice concurrently on the same Future instance.
+	Error() error
+}
+
+// IndexFuture is used for future actions that can result in a raft log entry
+// being created.
+type IndexFuture interface {
+	Future
+
+	// Index holds the index of the newly applied log entry.
+	// This must not be called until after the Error method has returned.
+	Index() uint64
+}
+
+// ApplyFuture is used for Apply and can return the FSM response.
+type ApplyFuture interface {
+	IndexFuture
+
+	// Response returns the FSM response as returned
+	// by the FSM.Apply method. This must not be called
+	// until after the Error method has returned.
+	Response() interface{}
+}
+
+// ConfigurationFuture is used for GetConfiguration and can return the
+// latest configuration in use by Raft.
+type ConfigurationFuture interface {
+	IndexFuture
+
+	// Configuration contains the latest configuration. This must
+	// not be called until after the Error method has returned.
+	Configuration() Configuration
+}
+
+// SnapshotFuture is used for waiting on a user-triggered snapshot to complete.
+type SnapshotFuture interface {
+	Future
+
+	// Open is a function you can call to access the underlying snapshot and
+	// its metadata. This must not be called until after the Error method
+	// has returned.
+	Open() (*SnapshotMeta, io.ReadCloser, error)
+}
+
+// errorFuture is used to return a static error.
+type errorFuture struct {
+	err error
+}
+
+func (e errorFuture) Error() error {
+	return e.err
+}
+
+func (e errorFuture) Response() interface{} {
+	return nil
+}
+
+func (e errorFuture) Index() uint64 {
+	return 0
+}
+
+// deferError can be embedded to allow a future
+// to provide an error in the future.
+type deferError struct {
+	err       error
+	errCh     chan error
+	responded bool
+}
+
+func (d *deferError) init() {
+	d.errCh = make(chan error, 1)
+}
+
+func (d *deferError) Error() error {
+	if d.err != nil {
+		// Note that when we've received a nil error, this
+		// won't trigger, but the channel is closed after
+		// send so we'll still return nil below.
+		return d.err
+	}
+	if d.errCh == nil {
+		panic("waiting for response on nil channel")
+	}
+	d.err = <-d.errCh
+	return d.err
+}
+
+func (d *deferError) respond(err error) {
+	if d.errCh == nil {
+		return
+	}
+	if d.responded {
+		return
+	}
+	d.errCh <- err
+	close(d.errCh)
+	d.responded = true
+}
+
+// There are several types of requests that cause a configuration entry to
+// be appended to the log. These are encoded here for leaderLoop() to process.
+// This is internal to a single server.
+type configurationChangeFuture struct {
+	logFuture
+	req configurationChangeRequest
+}
+
+// bootstrapFuture is used to attempt a live bootstrap of the cluster. See the
+// Raft object's BootstrapCluster member function for more details.
+type bootstrapFuture struct {
+	deferError
+
+	// configuration is the proposed bootstrap configuration to apply.
+	configuration Configuration
+}
+
+// logFuture is used to apply a log entry and waits until
+// the log is considered committed.
+type logFuture struct {
+	deferError
+	log      Log
+	response interface{}
+	dispatch time.Time
+}
+
+func (l *logFuture) Response() interface{} {
+	return l.response
+}
+
+func (l *logFuture) Index() uint64 {
+	return l.log.Index
+}
+
+type shutdownFuture struct {
+	raft *Raft
+}
+
+func (s *shutdownFuture) Error() error {
+	if s.raft == nil {
+		return nil
+	}
+	s.raft.waitShutdown()
+	if closeable, ok := s.raft.trans.(WithClose); ok {
+		closeable.Close()
+	}
+	return nil
+}
+
+// userSnapshotFuture is used for waiting on a user-triggered snapshot to
+// complete.
+type userSnapshotFuture struct {
+	deferError
+
+	// opener is a function used to open the snapshot. This is filled in
+	// once the future returns with no error.
+	opener func() (*SnapshotMeta, io.ReadCloser, error)
+}
+
+// Open is a function you can call to access the underlying snapshot and its
+// metadata.
+func (u *userSnapshotFuture) Open() (*SnapshotMeta, io.ReadCloser, error) {
+	if u.opener == nil {
+		return nil, nil, fmt.Errorf("no snapshot available")
+	} else {
+		// Invalidate the opener so it can't get called multiple times,
+		// which isn't generally safe.
+		defer func() {
+			u.opener = nil
+		}()
+		return u.opener()
+	}
+}
+
+// userRestoreFuture is used for waiting on a user-triggered restore of an
+// external snapshot to complete.
+type userRestoreFuture struct {
+	deferError
+
+	// meta is the metadata that belongs with the snapshot.
+	meta *SnapshotMeta
+
+	// reader is the interface to read the snapshot contents from.
+	reader io.Reader
+}
+
+// reqSnapshotFuture is used for requesting a snapshot start.
+// It is only used internally.
+type reqSnapshotFuture struct {
+	deferError
+
+	// snapshot details provided by the FSM runner before responding
+	index    uint64
+	term     uint64
+	snapshot FSMSnapshot
+}
+
+// restoreFuture is used for requesting an FSM to perform a
+// snapshot restore. Used internally only.
+type restoreFuture struct {
+	deferError
+	ID string
+}
+
+// verifyFuture is used to verify the current node is still
+// the leader. This is to prevent a stale read.
+type verifyFuture struct {
+	deferError
+	notifyCh   chan *verifyFuture
+	quorumSize int
+	votes      int
+	voteLock   sync.Mutex
+}
+
+// configurationsFuture is used to retrieve the current configurations. This is
+// used to allow safe access to this information outside of the main thread.
+type configurationsFuture struct {
+	deferError
+	configurations configurations
+}
+
+// Configuration returns the latest configuration in use by Raft.
+func (c *configurationsFuture) Configuration() Configuration {
+	return c.configurations.latest
+}
+
+// Index returns the index of the latest configuration in use by Raft.
+func (c *configurationsFuture) Index() uint64 {
+	return c.configurations.latestIndex
+}
+
+// vote is used to respond to a verifyFuture.
+// This may block when responding on the notifyCh.
+func (v *verifyFuture) vote(leader bool) {
+	v.voteLock.Lock()
+	defer v.voteLock.Unlock()
+
+	// Guard against having notified already
+	if v.notifyCh == nil {
+		return
+	}
+
+	if leader {
+		v.votes++
+		if v.votes >= v.quorumSize {
+			v.notifyCh <- v
+			v.notifyCh = nil
+		}
+	} else {
+		v.notifyCh <- v
+		v.notifyCh = nil
+	}
+}
+
+// appendFuture is used for waiting on a pipelined append
+// entries RPC.
+type appendFuture struct {
+	deferError
+	start time.Time
+	args  *AppendEntriesRequest
+	resp  *AppendEntriesResponse
+}
+
+func (a *appendFuture) Start() time.Time {
+	return a.start
+}
+
+func (a *appendFuture) Request() *AppendEntriesRequest {
+	return a.args
+}
+
+func (a *appendFuture) Response() *AppendEntriesResponse {
+	return a.resp
+}
diff --git a/vendor/github.com/hashicorp/raft/go.mod b/vendor/github.com/hashicorp/raft/go.mod
new file mode 100644
index 0000000000..09803b688f
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/go.mod
@@ -0,0 +1,10 @@
+module github.com/hashicorp/raft
+
+go 1.12
+
+require (
+	github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878
+	github.com/hashicorp/go-hclog v0.9.1
+	github.com/hashicorp/go-msgpack v0.5.5
+	github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/hashicorp/raft/go.sum b/vendor/github.com/hashicorp/raft/go.sum
new file mode 100644
index 0000000000..b06b6a7a4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/go.sum
@@ -0,0 +1,37 @@
+github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
+github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM=
+github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
+github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/vendor/github.com/hashicorp/raft/inmem_snapshot.go b/vendor/github.com/hashicorp/raft/inmem_snapshot.go
new file mode 100644
index 0000000000..ad52f93aef
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/inmem_snapshot.go
@@ -0,0 +1,109 @@
+package raft
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"sync"
+)
+
+// InmemSnapshotStore implements the SnapshotStore interface and
+// retains only the most recent snapshot
+type InmemSnapshotStore struct {
+	latest      *InmemSnapshotSink
+	hasSnapshot bool
+	sync.RWMutex
+}
+
+// InmemSnapshotSink implements SnapshotSink in memory
+type InmemSnapshotSink struct {
+	meta     SnapshotMeta
+	contents *bytes.Buffer
+}
+
+// NewInmemSnapshotStore creates a blank new InmemSnapshotStore
+func NewInmemSnapshotStore() *InmemSnapshotStore {
+	return &InmemSnapshotStore{
+		latest: &InmemSnapshotSink{
+			contents: &bytes.Buffer{},
+		},
+	}
+}
+
+// Create replaces the stored snapshot with a new one using the given args
+func (m *InmemSnapshotStore) Create(version SnapshotVersion, index, term uint64,
+	configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) {
+	// We only support version 1 snapshots at this time.
+	if version != 1 {
+		return nil, fmt.Errorf("unsupported snapshot version %d", version)
+	}
+
+	name := snapshotName(term, index)
+
+	m.Lock()
+	defer m.Unlock()
+
+	sink := &InmemSnapshotSink{
+		meta: SnapshotMeta{
+			Version:            version,
+			ID:                 name,
+			Index:              index,
+			Term:               term,
+			Peers:              encodePeers(configuration, trans),
+			Configuration:      configuration,
+			ConfigurationIndex: configurationIndex,
+		},
+		contents: &bytes.Buffer{},
+	}
+	m.hasSnapshot = true
+	m.latest = sink
+
+	return sink, nil
+}
+
+// List returns the latest snapshot taken
+func (m *InmemSnapshotStore) List() ([]*SnapshotMeta, error) {
+	m.RLock()
+	defer m.RUnlock()
+
+	if !m.hasSnapshot {
+		return []*SnapshotMeta{}, nil
+	}
+	return []*SnapshotMeta{&m.latest.meta}, nil
+}
+
+// Open wraps an io.ReadCloser around the snapshot contents
+func (m *InmemSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) {
+	m.RLock()
+	defer m.RUnlock()
+
+	if m.latest.meta.ID != id {
+		return nil, nil, fmt.Errorf("[ERR] snapshot: failed to open snapshot id: %s", id)
+	}
+
+	// Make a copy of the contents, since a bytes.Buffer can only be read
+	// once.
+	contents := bytes.NewBuffer(m.latest.contents.Bytes())
+	return &m.latest.meta, ioutil.NopCloser(contents), nil
+}
+
+// Write appends the given bytes to the snapshot contents
+func (s *InmemSnapshotSink) Write(p []byte) (n int, err error) {
+	written, err := io.Copy(s.contents, bytes.NewReader(p))
+	s.meta.Size += written
+	return int(written), err
+}
+
+// Close updates the Size and is otherwise a no-op
+func (s *InmemSnapshotSink) Close() error {
+	return nil
+}
+
+func (s *InmemSnapshotSink) ID() string {
+	return s.meta.ID
+}
+
+func (s *InmemSnapshotSink) Cancel() error {
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/raft/inmem_store.go b/vendor/github.com/hashicorp/raft/inmem_store.go
new file mode 100644
index 0000000000..6285610f9a
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/inmem_store.go
@@ -0,0 +1,130 @@
+package raft
+
+import (
+	"errors"
+	"sync"
+)
+
+// InmemStore implements the LogStore and StableStore interface.
+// It should NOT EVER be used for production. It is used only for
+// unit tests. Use the MDBStore implementation instead.
+type InmemStore struct {
+	l         sync.RWMutex
+	lowIndex  uint64
+	highIndex uint64
+	logs      map[uint64]*Log
+	kv        map[string][]byte
+	kvInt     map[string]uint64
+}
+
+// NewInmemStore returns a new in-memory backend. Do not ever
+// use for production. Only for testing.
+func NewInmemStore() *InmemStore {
+	i := &InmemStore{
+		logs:  make(map[uint64]*Log),
+		kv:    make(map[string][]byte),
+		kvInt: make(map[string]uint64),
+	}
+	return i
+}
+
+// FirstIndex implements the LogStore interface.
+func (i *InmemStore) FirstIndex() (uint64, error) {
+	i.l.RLock()
+	defer i.l.RUnlock()
+	return i.lowIndex, nil
+}
+
+// LastIndex implements the LogStore interface.
+func (i *InmemStore) LastIndex() (uint64, error) {
+	i.l.RLock()
+	defer i.l.RUnlock()
+	return i.highIndex, nil
+}
+
+// GetLog implements the LogStore interface.
+func (i *InmemStore) GetLog(index uint64, log *Log) error {
+	i.l.RLock()
+	defer i.l.RUnlock()
+	l, ok := i.logs[index]
+	if !ok {
+		return ErrLogNotFound
+	}
+	*log = *l
+	return nil
+}
+
+// StoreLog implements the LogStore interface.
+func (i *InmemStore) StoreLog(log *Log) error {
+	return i.StoreLogs([]*Log{log})
+}
+
+// StoreLogs implements the LogStore interface.
+func (i *InmemStore) StoreLogs(logs []*Log) error {
+	i.l.Lock()
+	defer i.l.Unlock()
+	for _, l := range logs {
+		i.logs[l.Index] = l
+		if i.lowIndex == 0 {
+			i.lowIndex = l.Index
+		}
+		if l.Index > i.highIndex {
+			i.highIndex = l.Index
+		}
+	}
+	return nil
+}
+
+// DeleteRange implements the LogStore interface.
+func (i *InmemStore) DeleteRange(min, max uint64) error {
+	i.l.Lock()
+	defer i.l.Unlock()
+	for j := min; j <= max; j++ {
+		delete(i.logs, j)
+	}
+	if min <= i.lowIndex {
+		i.lowIndex = max + 1
+	}
+	if max >= i.highIndex {
+		i.highIndex = min - 1
+	}
+	if i.lowIndex > i.highIndex {
+		i.lowIndex = 0
+		i.highIndex = 0
+	}
+	return nil
+}
+
+// Set implements the StableStore interface.
+func (i *InmemStore) Set(key []byte, val []byte) error {
+	i.l.Lock()
+	defer i.l.Unlock()
+	i.kv[string(key)] = val
+	return nil
+}
+
+// Get implements the StableStore interface.
+func (i *InmemStore) Get(key []byte) ([]byte, error) {
+	i.l.RLock()
+	defer i.l.RUnlock()
+	val := i.kv[string(key)]
+	if val == nil {
+		return nil, errors.New("not found")
+	}
+	return val, nil
+}
+
+// SetUint64 implements the StableStore interface.
+func (i *InmemStore) SetUint64(key []byte, val uint64) error {
+	i.l.Lock()
+	defer i.l.Unlock()
+	i.kvInt[string(key)] = val
+	return nil
+}
+
+// GetUint64 implements the StableStore interface.
+func (i *InmemStore) GetUint64(key []byte) (uint64, error) {
+	i.l.RLock()
+	defer i.l.RUnlock()
+	return i.kvInt[string(key)], nil
+}
diff --git a/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/raft/inmem_transport.go
new file mode 100644
index 0000000000..bb42eeb68b
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/inmem_transport.go
@@ -0,0 +1,335 @@
+package raft
+
+import (
+	"fmt"
+	"io"
+	"sync"
+	"time"
+)
+
+// NewInmemAddr returns a new in-memory addr with
+// a randomly generate UUID as the ID.
+func NewInmemAddr() ServerAddress {
+	return ServerAddress(generateUUID())
+}
+
+// inmemPipeline is used to pipeline requests for the in-mem transport.
+type inmemPipeline struct {
+	trans    *InmemTransport
+	peer     *InmemTransport
+	peerAddr ServerAddress
+
+	doneCh       chan AppendFuture
+	inprogressCh chan *inmemPipelineInflight
+
+	shutdown     bool
+	shutdownCh   chan struct{}
+	shutdownLock sync.Mutex
+}
+
+type inmemPipelineInflight struct {
+	future *appendFuture
+	respCh <-chan RPCResponse
+}
+
+// InmemTransport Implements the Transport interface, to allow Raft to be
+// tested in-memory without going over a network.
+type InmemTransport struct {
+	sync.RWMutex
+	consumerCh chan RPC
+	localAddr  ServerAddress
+	peers      map[ServerAddress]*InmemTransport
+	pipelines  []*inmemPipeline
+	timeout    time.Duration
+}
+
+// NewInmemTransportWithTimeout is used to initialize a new transport and
+// generates a random local address if none is specified. The given timeout
+// will be used to decide how long to wait for a connected peer to process the
+// RPCs that we're sending it. See also Connect() and Consumer().
+func NewInmemTransportWithTimeout(addr ServerAddress, timeout time.Duration) (ServerAddress, *InmemTransport) {
+	if string(addr) == "" {
+		addr = NewInmemAddr()
+	}
+	trans := &InmemTransport{
+		consumerCh: make(chan RPC, 16),
+		localAddr:  addr,
+		peers:      make(map[ServerAddress]*InmemTransport),
+		timeout:    timeout,
+	}
+	return addr, trans
+}
+
+// NewInmemTransport is used to initialize a new transport
+// and generates a random local address if none is specified
+func NewInmemTransport(addr ServerAddress) (ServerAddress, *InmemTransport) {
+	return NewInmemTransportWithTimeout(addr, 50*time.Millisecond)
+}
+
+// SetHeartbeatHandler is used to set optional fast-path for
+// heartbeats, not supported for this transport.
+func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) {
+}
+
+// Consumer implements the Transport interface.
+func (i *InmemTransport) Consumer() <-chan RPC {
+	return i.consumerCh
+}
+
+// LocalAddr implements the Transport interface.
+func (i *InmemTransport) LocalAddr() ServerAddress {
+	return i.localAddr
+}
+
+// AppendEntriesPipeline returns an interface that can be used to pipeline
+// AppendEntries requests.
+func (i *InmemTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) {
+	i.Lock()
+	defer i.Unlock()
+
+	peer, ok := i.peers[target]
+	if !ok {
+		return nil, fmt.Errorf("failed to connect to peer: %v", target)
+	}
+	pipeline := newInmemPipeline(i, peer, target)
+	i.pipelines = append(i.pipelines, pipeline)
+	return pipeline, nil
+}
+
+// AppendEntries implements the Transport interface.
+func (i *InmemTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error {
+	rpcResp, err := i.makeRPC(target, args, nil, i.timeout)
+	if err != nil {
+		return err
+	}
+
+	// Copy the result back
+	out := rpcResp.Response.(*AppendEntriesResponse)
+	*resp = *out
+	return nil
+}
+
+// RequestVote implements the Transport interface.
+func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error {
+	rpcResp, err := i.makeRPC(target, args, nil, i.timeout)
+	if err != nil {
+		return err
+	}
+
+	// Copy the result back
+	out := rpcResp.Response.(*RequestVoteResponse)
+	*resp = *out
+	return nil
+}
+
+// InstallSnapshot implements the Transport interface.
+func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error {
+	rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout)
+	if err != nil {
+		return err
+	}
+
+	// Copy the result back
+	out := rpcResp.Response.(*InstallSnapshotResponse)
+	*resp = *out
+	return nil
+}
+
+func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) {
+	i.RLock()
+	peer, ok := i.peers[target]
+	i.RUnlock()
+
+	if !ok {
+		err = fmt.Errorf("failed to connect to peer: %v", target)
+		return
+	}
+
+	// Send the RPC over
+	respCh := make(chan RPCResponse)
+	req := RPC{
+		Command:  args,
+		Reader:   r,
+		RespChan: respCh,
+	}
+	select {
+	case peer.consumerCh <- req:
+	case <-time.After(timeout):
+		err = fmt.Errorf("send timed out")
+		return
+	}
+
+	// Wait for a response
+	select {
+	case rpcResp = <-respCh:
+		if rpcResp.Error != nil {
+			err = rpcResp.Error
+		}
+	case <-time.After(timeout):
+		err = fmt.Errorf("command timed out")
+	}
+	return
+}
+
+// EncodePeer implements the Transport interface.
+func (i *InmemTransport) EncodePeer(id ServerID, p ServerAddress) []byte {
+	return []byte(p)
+}
+
+// DecodePeer implements the Transport interface.
+func (i *InmemTransport) DecodePeer(buf []byte) ServerAddress {
+	return ServerAddress(buf)
+}
+
+// Connect is used to connect this transport to another transport for
+// a given peer name. This allows for local routing.
+func (i *InmemTransport) Connect(peer ServerAddress, t Transport) {
+	trans := t.(*InmemTransport)
+	i.Lock()
+	defer i.Unlock()
+	i.peers[peer] = trans
+}
+
+// Disconnect is used to remove the ability to route to a given peer.
+func (i *InmemTransport) Disconnect(peer ServerAddress) {
+	i.Lock()
+	defer i.Unlock()
+	delete(i.peers, peer)
+
+	// Disconnect any pipelines
+	n := len(i.pipelines)
+	for idx := 0; idx < n; idx++ {
+		if i.pipelines[idx].peerAddr == peer {
+			i.pipelines[idx].Close()
+			i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil
+			idx--
+			n--
+		}
+	}
+	i.pipelines = i.pipelines[:n]
+}
+
+// DisconnectAll is used to remove all routes to peers.
+func (i *InmemTransport) DisconnectAll() {
+	i.Lock()
+	defer i.Unlock()
+	i.peers = make(map[ServerAddress]*InmemTransport)
+
+	// Handle pipelines
+	for _, pipeline := range i.pipelines {
+		pipeline.Close()
+	}
+	i.pipelines = nil
+}
+
+// Close is used to permanently disable the transport
+func (i *InmemTransport) Close() error {
+	i.DisconnectAll()
+	return nil
+}
+
+func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr ServerAddress) *inmemPipeline {
+	i := &inmemPipeline{
+		trans:        trans,
+		peer:         peer,
+		peerAddr:     addr,
+		doneCh:       make(chan AppendFuture, 16),
+		inprogressCh: make(chan *inmemPipelineInflight, 16),
+		shutdownCh:   make(chan struct{}),
+	}
+	go i.decodeResponses()
+	return i
+}
+
+func (i *inmemPipeline) decodeResponses() {
+	timeout := i.trans.timeout
+	for {
+		select {
+		case inp := <-i.inprogressCh:
+			var timeoutCh <-chan time.Time
+			if timeout > 0 {
+				timeoutCh = time.After(timeout)
+			}
+
+			select {
+			case rpcResp := <-inp.respCh:
+				// Copy the result back
+				*inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse)
+				inp.future.respond(rpcResp.Error)
+
+				select {
+				case i.doneCh <- inp.future:
+				case <-i.shutdownCh:
+					return
+				}
+
+			case <-timeoutCh:
+				inp.future.respond(fmt.Errorf("command timed out"))
+				select {
+				case i.doneCh <- inp.future:
+				case <-i.shutdownCh:
+					return
+				}
+
+			case <-i.shutdownCh:
+				return
+			}
+		case <-i.shutdownCh:
+			return
+		}
+	}
+}
+
+func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) {
+	// Create a new future
+	future := &appendFuture{
+		start: time.Now(),
+		args:  args,
+		resp:  resp,
+	}
+	future.init()
+
+	// Handle a timeout
+	var timeout <-chan time.Time
+	if i.trans.timeout > 0 {
+		timeout = time.After(i.trans.timeout)
+	}
+
+	// Send the RPC over
+	respCh := make(chan RPCResponse, 1)
+	rpc := RPC{
+		Command:  args,
+		RespChan: respCh,
+	}
+	select {
+	case i.peer.consumerCh <- rpc:
+	case <-timeout:
+		return nil, fmt.Errorf("command enqueue timeout")
+	case <-i.shutdownCh:
+		return nil, ErrPipelineShutdown
+	}
+
+	// Send to be decoded
+	select {
+	case i.inprogressCh <- &inmemPipelineInflight{future, respCh}:
+		return future, nil
+	case <-i.shutdownCh:
+		return nil, ErrPipelineShutdown
+	}
+}
+
+func (i *inmemPipeline) Consumer() <-chan AppendFuture {
+	return i.doneCh
+}
+
+func (i *inmemPipeline) Close() error {
+	i.shutdownLock.Lock()
+	defer i.shutdownLock.Unlock()
+	if i.shutdown {
+		return nil
+	}
+
+	i.shutdown = true
+	close(i.shutdownCh)
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/raft/log.go b/vendor/github.com/hashicorp/raft/log.go
new file mode 100644
index 0000000000..4ade38ecc1
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/log.go
@@ -0,0 +1,72 @@
+package raft
+
+// LogType describes various types of log entries.
+type LogType uint8
+
+const (
+	// LogCommand is applied to a user FSM.
+	LogCommand LogType = iota
+
+	// LogNoop is used to assert leadership.
+	LogNoop
+
+	// LogAddPeer is used to add a new peer. This should only be used with
+	// older protocol versions designed to be compatible with unversioned
+	// Raft servers. See comments in config.go for details.
+	LogAddPeerDeprecated
+
+	// LogRemovePeer is used to remove an existing peer. This should only be
+	// used with older protocol versions designed to be compatible with
+	// unversioned Raft servers. See comments in config.go for details.
+	LogRemovePeerDeprecated
+
+	// LogBarrier is used to ensure all preceding operations have been
+	// applied to the FSM. It is similar to LogNoop, but instead of returning
+	// once committed, it only returns once the FSM manager acks it. Otherwise
+	// it is possible there are operations committed but not yet applied to
+	// the FSM.
+	LogBarrier
+
+	// LogConfiguration establishes a membership change configuration. It is
+	// created when a server is added, removed, promoted, etc. Only used
+	// when protocol version 1 or greater is in use.
+	LogConfiguration
+)
+
+// Log entries are replicated to all members of the Raft cluster
+// and form the heart of the replicated state machine.
+type Log struct {
+	// Index holds the index of the log entry.
+	Index uint64
+
+	// Term holds the election term of the log entry.
+	Term uint64
+
+	// Type holds the type of the log entry.
+	Type LogType
+
+	// Data holds the log entry's type-specific data.
+	Data []byte
+}
+
+// LogStore is used to provide an interface for storing
+// and retrieving logs in a durable fashion.
+type LogStore interface {
+	// FirstIndex returns the first index written. 0 for no entries.
+	FirstIndex() (uint64, error)
+
+	// LastIndex returns the last index written. 0 for no entries.
+	LastIndex() (uint64, error)
+
+	// GetLog gets a log entry at a given index.
+	GetLog(index uint64, log *Log) error
+
+	// StoreLog stores a log entry.
+	StoreLog(log *Log) error
+
+	// StoreLogs stores multiple log entries.
+	StoreLogs(logs []*Log) error
+
+	// DeleteRange deletes a range of log entries. The range is inclusive.
+	DeleteRange(min, max uint64) error
+}
diff --git a/vendor/github.com/hashicorp/raft/log_cache.go b/vendor/github.com/hashicorp/raft/log_cache.go
new file mode 100644
index 0000000000..952e98c228
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/log_cache.go
@@ -0,0 +1,79 @@
+package raft
+
+import (
+	"fmt"
+	"sync"
+)
+
+// LogCache wraps any LogStore implementation to provide an
+// in-memory ring buffer. This is used to cache access to
+// the recently written entries. For implementations that do not
+// cache themselves, this can provide a substantial boost by
+// avoiding disk I/O on recent entries.
+type LogCache struct {
+	store LogStore
+
+	cache []*Log
+	l     sync.RWMutex
+}
+
+// NewLogCache is used to create a new LogCache with the
+// given capacity and backend store.
+func NewLogCache(capacity int, store LogStore) (*LogCache, error) {
+	if capacity <= 0 {
+		return nil, fmt.Errorf("capacity must be positive")
+	}
+	c := &LogCache{
+		store: store,
+		cache: make([]*Log, capacity),
+	}
+	return c, nil
+}
+
+func (c *LogCache) GetLog(idx uint64, log *Log) error {
+	// Check the buffer for an entry
+	c.l.RLock()
+	cached := c.cache[idx%uint64(len(c.cache))]
+	c.l.RUnlock()
+
+	// Check if entry is valid
+	if cached != nil && cached.Index == idx {
+		*log = *cached
+		return nil
+	}
+
+	// Forward request on cache miss
+	return c.store.GetLog(idx, log)
+}
+
+func (c *LogCache) StoreLog(log *Log) error {
+	return c.StoreLogs([]*Log{log})
+}
+
+func (c *LogCache) StoreLogs(logs []*Log) error {
+	// Insert the logs into the ring buffer
+	c.l.Lock()
+	for _, l := range logs {
+		c.cache[l.Index%uint64(len(c.cache))] = l
+	}
+	c.l.Unlock()
+
+	return c.store.StoreLogs(logs)
+}
+
+func (c *LogCache) FirstIndex() (uint64, error) {
+	return c.store.FirstIndex()
+}
+
+func (c *LogCache) LastIndex() (uint64, error) {
+	return c.store.LastIndex()
+}
+
+func (c *LogCache) DeleteRange(min, max uint64) error {
+	// Invalidate the cache on deletes
+	c.l.Lock()
+	c.cache = make([]*Log, len(c.cache))
+	c.l.Unlock()
+
+	return c.store.DeleteRange(min, max)
+}
diff --git a/vendor/github.com/hashicorp/raft/membership.md b/vendor/github.com/hashicorp/raft/membership.md
new file mode 100644
index 0000000000..df1f83e27f
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/membership.md
@@ -0,0 +1,83 @@
+Simon (@superfell) and I (@ongardie) talked through reworking this library's cluster membership changes last Friday. We don't see a way to split this into independent patches, so we're taking the next best approach: submitting the plan here for review, then working on an enormous PR. Your feedback would be appreciated. (@superfell is out this week, however, so don't expect him to respond quickly.)
+
+These are the main goals:
+ - Bringing things in line with the description in my PhD dissertation;
+ - Catching up new servers prior to granting them a vote, as well as allowing permanent non-voting members; and
+ - Eliminating the `peers.json` file, to avoid issues of consistency between that and the log/snapshot.
+
+## Data-centric view
+
+We propose to re-define a *configuration* as a set of servers, where each server includes an address (as it does today) and a mode that is either:
+ - *Voter*: a server whose vote is counted in elections and whose match index is used in advancing the leader's commit index.
+ - *Nonvoter*: a server that receives log entries but is not considered for elections or commitment purposes.
+ - *Staging*: a server that acts like a nonvoter with one exception: once a staging server receives enough log entries to catch up sufficiently to the leader's log, the leader will invoke a  membership change to change the staging server to a voter.
+
+All changes to the configuration will be done by writing a new configuration to the log. The new configuration will be in affect as soon as it is appended to the log (not when it is committed like a normal state machine command). Note that, per my dissertation, there can be at most one uncommitted configuration at a time (the next configuration may not be created until the prior one has been committed). It's not strictly necessary to follow these same rules for the nonvoter/staging servers, but we think its best to treat all changes uniformly.
+
+Each server will track two configurations:
+ 1. its *committed configuration*: the latest configuration in the log/snapshot that has been committed, along with its index.
+ 2. its *latest configuration*: the latest configuration in the log/snapshot (may be committed or uncommitted), along with its index.
+
+When there's no membership change happening, these two will be the same. The latest configuration is almost always the one used, except:
+ - When followers truncate the suffix of their logs, they may need to fall back to the committed configuration.
+ - When snapshotting, the committed configuration is written, to correspond with the committed log prefix that is being snapshotted.
+
+
+## Application API
+
+We propose the following operations for clients to manipulate the cluster configuration:
+ - AddVoter: server becomes staging unless voter,
+ - AddNonvoter: server becomes nonvoter unless staging or voter,
+ - DemoteVoter: server becomes nonvoter unless absent,
+ - RemovePeer: server removed from configuration,
+ - GetConfiguration: waits for latest config to commit, returns committed config.
+
+This diagram, of which I'm quite proud, shows the possible transitions:
+```
++-----------------------------------------------------------------------------+
+|                                                                             |
+|                      Start ->  +--------+                                   |
+|            ,------<------------|        |                                   |
+|           /                    | absent |                                   |
+|          /       RemovePeer--> |        | <---RemovePeer                    |
+|         /            |         +--------+               \                   |
+|        /             |            |                      \                  |
+|   AddNonvoter        |         AddVoter                   \                 |
+|       |       ,->---' `--<-.      |                        \                |
+|       v      /              \     v                         \               |
+|  +----------+                +----------+                    +----------+   |
+|  |          | ---AddVoter--> |          | -log caught up --> |          |   |
+|  | nonvoter |                | staging  |                    |  voter   |   |
+|  |          | <-DemoteVoter- |          |                 ,- |          |   |
+|  +----------+         \      +----------+                /   +----------+   |
+|                        \                                /                   |
+|                         `--------------<---------------'                    |
+|                                                                             |
++-----------------------------------------------------------------------------+
+```
+
+While these operations aren't quite symmetric, we think they're a good set to capture
+the possible intent of the user. For example, if I want to make sure a server doesn't have a vote, but the server isn't part of the configuration at all, it probably shouldn't be added as a nonvoting server.
+
+Each of these application-level operations will be interpreted by the leader and, if it has an effect, will cause the leader to write a new configuration entry to its log. Which particular application-level operation caused the log entry to be written need not be part of the log entry.
+
+## Code implications
+
+This is a non-exhaustive list, but we came up with a few things:
+- Remove the PeerStore: the `peers.json` file introduces the possibility of getting out of sync with the log and snapshot, and it's hard to maintain this atomically as the log changes. It's not clear whether it's meant to track the committed or latest configuration, either.
+- Servers will have to search their snapshot and log to find the committed configuration and the latest configuration on startup.
+- Bootstrap will no longer use `peers.json` but should initialize the log or snapshot with an application-provided configuration entry.
+- Snapshots should store the index of their configuration along with the configuration itself. In my experience with LogCabin, the original log index of the configuration is very useful to include in debug log messages.
+- As noted in hashicorp/raft#84, configuration change requests should come in via a separate channel, and one may not proceed until the last has been committed.
+- As to deciding when a log is sufficiently caught up, implementing a sophisticated algorithm *is* something that can be done in a separate PR. An easy and decent placeholder is: once the staging server has reached 95% of the leader's commit index, promote it.
+
+## Feedback
+
+Again, we're looking for feedback here before we start working on this. Here are some questions to think about:
+ - Does this seem like where we want things to go?
+ - Is there anything here that should be left out?
+ - Is there anything else we're forgetting about?
+ - Is there a good way to break this up?
+ - What do we need to worry about in terms of backwards compatibility?
+ - What implication will this have on current tests?
+ - What's the best way to test this code, in particular the small changes that will be sprinkled all over the library?
diff --git a/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/raft/net_transport.go
new file mode 100644
index 0000000000..4f1f101e00
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/net_transport.go
@@ -0,0 +1,757 @@
+package raft
+
+import (
+	"bufio"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"os"
+	"sync"
+	"time"
+
+	"github.com/hashicorp/go-msgpack/codec"
+)
+
+const (
+	rpcAppendEntries uint8 = iota
+	rpcRequestVote
+	rpcInstallSnapshot
+
+	// DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport.
+	DefaultTimeoutScale = 256 * 1024 // 256KB
+
+	// rpcMaxPipeline controls the maximum number of outstanding
+	// AppendEntries RPC calls.
+	rpcMaxPipeline = 128
+)
+
+var (
+	// ErrTransportShutdown is returned when operations on a transport are
+	// invoked after it's been terminated.
+	ErrTransportShutdown = errors.New("transport shutdown")
+
+	// ErrPipelineShutdown is returned when the pipeline is closed.
+	ErrPipelineShutdown = errors.New("append pipeline closed")
+)
+
+/*
+
+NetworkTransport provides a network based transport that can be
+used to communicate with Raft on remote machines. It requires
+an underlying stream layer to provide a stream abstraction, which can
+be simple TCP, TLS, etc.
+
+This transport is very simple and lightweight. Each RPC request is
+framed by sending a byte that indicates the message type, followed
+by the MsgPack encoded request.
+
+The response is an error string followed by the response object,
+both are encoded using MsgPack.
+
+InstallSnapshot is special, in that after the RPC request we stream
+the entire state. That socket is not re-used as the connection state
+is not known if there is an error.
+
+*/
+type NetworkTransport struct {
+	connPool     map[ServerAddress][]*netConn
+	connPoolLock sync.Mutex
+
+	consumeCh chan RPC
+
+	heartbeatFn     func(RPC)
+	heartbeatFnLock sync.Mutex
+
+	logger *log.Logger
+
+	maxPool int
+
+	serverAddressProvider ServerAddressProvider
+
+	shutdown     bool
+	shutdownCh   chan struct{}
+	shutdownLock sync.Mutex
+
+	stream StreamLayer
+
+	// streamCtx is used to cancel existing connection handlers.
+	streamCtx     context.Context
+	streamCancel  context.CancelFunc
+	streamCtxLock sync.RWMutex
+
+	timeout      time.Duration
+	TimeoutScale int
+}
+
+// NetworkTransportConfig encapsulates configuration for the network transport layer.
+type NetworkTransportConfig struct {
+	// ServerAddressProvider is used to override the target address when establishing a connection to invoke an RPC
+	ServerAddressProvider ServerAddressProvider
+
+	Logger *log.Logger
+
+	// Dialer
+	Stream StreamLayer
+
+	// MaxPool controls how many connections we will pool
+	MaxPool int
+
+	// Timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
+	// the timeout by (SnapshotSize / TimeoutScale).
+	Timeout time.Duration
+}
+
+type ServerAddressProvider interface {
+	ServerAddr(id ServerID) (ServerAddress, error)
+}
+
+// StreamLayer is used with the NetworkTransport to provide
+// the low level stream abstraction.
+type StreamLayer interface {
+	net.Listener
+
+	// Dial is used to create a new outgoing connection
+	Dial(address ServerAddress, timeout time.Duration) (net.Conn, error)
+}
+
+type netConn struct {
+	target ServerAddress
+	conn   net.Conn
+	r      *bufio.Reader
+	w      *bufio.Writer
+	dec    *codec.Decoder
+	enc    *codec.Encoder
+}
+
+func (n *netConn) Release() error {
+	return n.conn.Close()
+}
+
+type netPipeline struct {
+	conn  *netConn
+	trans *NetworkTransport
+
+	doneCh       chan AppendFuture
+	inprogressCh chan *appendFuture
+
+	shutdown     bool
+	shutdownCh   chan struct{}
+	shutdownLock sync.Mutex
+}
+
+// NewNetworkTransportWithConfig creates a new network transport with the given config struct
+func NewNetworkTransportWithConfig(
+	config *NetworkTransportConfig,
+) *NetworkTransport {
+	if config.Logger == nil {
+		config.Logger = log.New(os.Stderr, "", log.LstdFlags)
+	}
+	trans := &NetworkTransport{
+		connPool:              make(map[ServerAddress][]*netConn),
+		consumeCh:             make(chan RPC),
+		logger:                config.Logger,
+		maxPool:               config.MaxPool,
+		shutdownCh:            make(chan struct{}),
+		stream:                config.Stream,
+		timeout:               config.Timeout,
+		TimeoutScale:          DefaultTimeoutScale,
+		serverAddressProvider: config.ServerAddressProvider,
+	}
+
+	// Create the connection context and then start our listener.
+	trans.setupStreamContext()
+	go trans.listen()
+
+	return trans
+}
+
+// NewNetworkTransport creates a new network transport with the given dialer
+// and listener. The maxPool controls how many connections we will pool. The
+// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
+// the timeout by (SnapshotSize / TimeoutScale).
+func NewNetworkTransport(
+	stream StreamLayer,
+	maxPool int,
+	timeout time.Duration,
+	logOutput io.Writer,
+) *NetworkTransport {
+	if logOutput == nil {
+		logOutput = os.Stderr
+	}
+	logger := log.New(logOutput, "", log.LstdFlags)
+	config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger}
+	return NewNetworkTransportWithConfig(config)
+}
+
+// NewNetworkTransportWithLogger creates a new network transport with the given logger, dialer
+// and listener. The maxPool controls how many connections we will pool. The
+// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
+// the timeout by (SnapshotSize / TimeoutScale).
+func NewNetworkTransportWithLogger(
+	stream StreamLayer,
+	maxPool int,
+	timeout time.Duration,
+	logger *log.Logger,
+) *NetworkTransport {
+	config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger}
+	return NewNetworkTransportWithConfig(config)
+}
+
+// setupStreamContext is used to create a new stream context. This should be
+// called with the stream lock held.
+func (n *NetworkTransport) setupStreamContext() {
+	ctx, cancel := context.WithCancel(context.Background())
+	n.streamCtx = ctx
+	n.streamCancel = cancel
+}
+
+// getStreamContext is used retrieve the current stream context.
+func (n *NetworkTransport) getStreamContext() context.Context {
+	n.streamCtxLock.RLock()
+	defer n.streamCtxLock.RUnlock()
+	return n.streamCtx
+}
+
+// SetHeartbeatHandler is used to setup a heartbeat handler
+// as a fast-pass. This is to avoid head-of-line blocking from
+// disk IO.
+func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) {
+	n.heartbeatFnLock.Lock()
+	defer n.heartbeatFnLock.Unlock()
+	n.heartbeatFn = cb
+}
+
+// CloseStreams closes the current streams.
+func (n *NetworkTransport) CloseStreams() {
+	n.connPoolLock.Lock()
+	defer n.connPoolLock.Unlock()
+
+	// Close all the connections in the connection pool and then remove their
+	// entry.
+	for k, e := range n.connPool {
+		for _, conn := range e {
+			conn.Release()
+		}
+
+		delete(n.connPool, k)
+	}
+
+	// Cancel the existing connections and create a new context. Both these
+	// operations must always be done with the lock held otherwise we can create
+	// connection handlers that are holding a context that will never be
+	// cancelable.
+	n.streamCtxLock.Lock()
+	n.streamCancel()
+	n.setupStreamContext()
+	n.streamCtxLock.Unlock()
+}
+
+// Close is used to stop the network transport.
+func (n *NetworkTransport) Close() error {
+	n.shutdownLock.Lock()
+	defer n.shutdownLock.Unlock()
+
+	if !n.shutdown {
+		close(n.shutdownCh)
+		n.stream.Close()
+		n.shutdown = true
+	}
+	return nil
+}
+
+// Consumer implements the Transport interface.
+func (n *NetworkTransport) Consumer() <-chan RPC {
+	return n.consumeCh
+}
+
+// LocalAddr implements the Transport interface.
+func (n *NetworkTransport) LocalAddr() ServerAddress {
+	return ServerAddress(n.stream.Addr().String())
+}
+
+// IsShutdown is used to check if the transport is shutdown.
+func (n *NetworkTransport) IsShutdown() bool {
+	select {
+	case <-n.shutdownCh:
+		return true
+	default:
+		return false
+	}
+}
+
+// getExistingConn is used to grab a pooled connection.
+func (n *NetworkTransport) getPooledConn(target ServerAddress) *netConn {
+	n.connPoolLock.Lock()
+	defer n.connPoolLock.Unlock()
+
+	conns, ok := n.connPool[target]
+	if !ok || len(conns) == 0 {
+		return nil
+	}
+
+	var conn *netConn
+	num := len(conns)
+	conn, conns[num-1] = conns[num-1], nil
+	n.connPool[target] = conns[:num-1]
+	return conn
+}
+
+// getConnFromAddressProvider returns a connection from the server address provider if available, or defaults to a connection using the target server address
+func (n *NetworkTransport) getConnFromAddressProvider(id ServerID, target ServerAddress) (*netConn, error) {
+	address := n.getProviderAddressOrFallback(id, target)
+	return n.getConn(address)
+}
+
+func (n *NetworkTransport) getProviderAddressOrFallback(id ServerID, target ServerAddress) ServerAddress {
+	if n.serverAddressProvider != nil {
+		serverAddressOverride, err := n.serverAddressProvider.ServerAddr(id)
+		if err != nil {
+			n.logger.Printf("[WARN] raft: Unable to get address for server id %v, using fallback address %v: %v", id, target, err)
+		} else {
+			return serverAddressOverride
+		}
+	}
+	return target
+}
+
+// getConn is used to get a connection from the pool.
+func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) {
+	// Check for a pooled conn
+	if conn := n.getPooledConn(target); conn != nil {
+		return conn, nil
+	}
+
+	// Dial a new connection
+	conn, err := n.stream.Dial(target, n.timeout)
+	if err != nil {
+		return nil, err
+	}
+
+	// Wrap the conn
+	netConn := &netConn{
+		target: target,
+		conn:   conn,
+		r:      bufio.NewReader(conn),
+		w:      bufio.NewWriter(conn),
+	}
+
+	// Setup encoder/decoders
+	netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{})
+	netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{})
+
+	// Done
+	return netConn, nil
+}
+
+// returnConn returns a connection back to the pool.
+func (n *NetworkTransport) returnConn(conn *netConn) {
+	n.connPoolLock.Lock()
+	defer n.connPoolLock.Unlock()
+
+	key := conn.target
+	conns, _ := n.connPool[key]
+
+	if !n.IsShutdown() && len(conns) < n.maxPool {
+		n.connPool[key] = append(conns, conn)
+	} else {
+		conn.Release()
+	}
+}
+
+// AppendEntriesPipeline returns an interface that can be used to pipeline
+// AppendEntries requests.
+func (n *NetworkTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) {
+	// Get a connection
+	conn, err := n.getConnFromAddressProvider(id, target)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create the pipeline
+	return newNetPipeline(n, conn), nil
+}
+
+// AppendEntries implements the Transport interface.
+func (n *NetworkTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error {
+	return n.genericRPC(id, target, rpcAppendEntries, args, resp)
+}
+
+// RequestVote implements the Transport interface.
+func (n *NetworkTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error {
+	return n.genericRPC(id, target, rpcRequestVote, args, resp)
+}
+
+// genericRPC handles a simple request/response RPC.
+func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error {
+	// Get a conn
+	conn, err := n.getConnFromAddressProvider(id, target)
+	if err != nil {
+		return err
+	}
+
+	// Set a deadline
+	if n.timeout > 0 {
+		conn.conn.SetDeadline(time.Now().Add(n.timeout))
+	}
+
+	// Send the RPC
+	if err = sendRPC(conn, rpcType, args); err != nil {
+		return err
+	}
+
+	// Decode the response
+	canReturn, err := decodeResponse(conn, resp)
+	if canReturn {
+		n.returnConn(conn)
+	}
+	return err
+}
+
+// InstallSnapshot implements the Transport interface.
+func (n *NetworkTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error {
+	// Get a conn, always close for InstallSnapshot
+	conn, err := n.getConnFromAddressProvider(id, target)
+	if err != nil {
+		return err
+	}
+	defer conn.Release()
+
+	// Set a deadline, scaled by request size
+	if n.timeout > 0 {
+		timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale))
+		if timeout < n.timeout {
+			timeout = n.timeout
+		}
+		conn.conn.SetDeadline(time.Now().Add(timeout))
+	}
+
+	// Send the RPC
+	if err = sendRPC(conn, rpcInstallSnapshot, args); err != nil {
+		return err
+	}
+
+	// Stream the state
+	if _, err = io.Copy(conn.w, data); err != nil {
+		return err
+	}
+
+	// Flush
+	if err = conn.w.Flush(); err != nil {
+		return err
+	}
+
+	// Decode the response, do not return conn
+	_, err = decodeResponse(conn, resp)
+	return err
+}
+
+// EncodePeer implements the Transport interface.
+func (n *NetworkTransport) EncodePeer(id ServerID, p ServerAddress) []byte {
+	address := n.getProviderAddressOrFallback(id, p)
+	return []byte(address)
+}
+
+// DecodePeer implements the Transport interface.
+func (n *NetworkTransport) DecodePeer(buf []byte) ServerAddress {
+	return ServerAddress(buf)
+}
+
+// listen is used to handling incoming connections.
+func (n *NetworkTransport) listen() {
+	const baseDelay = 5 * time.Millisecond
+	const maxDelay = 1 * time.Second
+
+	var loopDelay time.Duration
+	for {
+		// Accept incoming connections
+		conn, err := n.stream.Accept()
+		if err != nil {
+			if loopDelay == 0 {
+				loopDelay = baseDelay
+			} else {
+				loopDelay *= 2
+			}
+
+			if loopDelay > maxDelay {
+				loopDelay = maxDelay
+			}
+
+			if !n.IsShutdown() {
+				n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err)
+			}
+
+			select {
+			case <-n.shutdownCh:
+				return
+			case <-time.After(loopDelay):
+				continue
+			}
+		}
+		// No error, reset loop delay
+		loopDelay = 0
+
+		n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr())
+
+		// Handle the connection in dedicated routine
+		go n.handleConn(n.getStreamContext(), conn)
+	}
+}
+
+// handleConn is used to handle an inbound connection for its lifespan. The
+// handler will exit when the passed context is cancelled or the connection is
+// closed.
+func (n *NetworkTransport) handleConn(connCtx context.Context, conn net.Conn) {
+	defer conn.Close()
+	r := bufio.NewReader(conn)
+	w := bufio.NewWriter(conn)
+	dec := codec.NewDecoder(r, &codec.MsgpackHandle{})
+	enc := codec.NewEncoder(w, &codec.MsgpackHandle{})
+
+	for {
+		select {
+		case <-connCtx.Done():
+			n.logger.Println("[DEBUG] raft-net: stream layer is closed")
+			return
+		default:
+		}
+
+		if err := n.handleCommand(r, dec, enc); err != nil {
+			if err != io.EOF {
+				n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err)
+			}
+			return
+		}
+		if err := w.Flush(); err != nil {
+			n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err)
+			return
+		}
+	}
+}
+
+// handleCommand is used to decode and dispatch a single command.
+func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error {
+	// Get the rpc type
+	rpcType, err := r.ReadByte()
+	if err != nil {
+		return err
+	}
+
+	// Create the RPC object
+	respCh := make(chan RPCResponse, 1)
+	rpc := RPC{
+		RespChan: respCh,
+	}
+
+	// Decode the command
+	isHeartbeat := false
+	switch rpcType {
+	case rpcAppendEntries:
+		var req AppendEntriesRequest
+		if err := dec.Decode(&req); err != nil {
+			return err
+		}
+		rpc.Command = &req
+
+		// Check if this is a heartbeat
+		if req.Term != 0 && req.Leader != nil &&
+			req.PrevLogEntry == 0 && req.PrevLogTerm == 0 &&
+			len(req.Entries) == 0 && req.LeaderCommitIndex == 0 {
+			isHeartbeat = true
+		}
+
+	case rpcRequestVote:
+		var req RequestVoteRequest
+		if err := dec.Decode(&req); err != nil {
+			return err
+		}
+		rpc.Command = &req
+
+	case rpcInstallSnapshot:
+		var req InstallSnapshotRequest
+		if err := dec.Decode(&req); err != nil {
+			return err
+		}
+		rpc.Command = &req
+		rpc.Reader = io.LimitReader(r, req.Size)
+
+	default:
+		return fmt.Errorf("unknown rpc type %d", rpcType)
+	}
+
+	// Check for heartbeat fast-path
+	if isHeartbeat {
+		n.heartbeatFnLock.Lock()
+		fn := n.heartbeatFn
+		n.heartbeatFnLock.Unlock()
+		if fn != nil {
+			fn(rpc)
+			goto RESP
+		}
+	}
+
+	// Dispatch the RPC
+	select {
+	case n.consumeCh <- rpc:
+	case <-n.shutdownCh:
+		return ErrTransportShutdown
+	}
+
+	// Wait for response
+RESP:
+	select {
+	case resp := <-respCh:
+		// Send the error first
+		respErr := ""
+		if resp.Error != nil {
+			respErr = resp.Error.Error()
+		}
+		if err := enc.Encode(respErr); err != nil {
+			return err
+		}
+
+		// Send the response
+		if err := enc.Encode(resp.Response); err != nil {
+			return err
+		}
+	case <-n.shutdownCh:
+		return ErrTransportShutdown
+	}
+	return nil
+}
+
+// decodeResponse is used to decode an RPC response and reports whether
+// the connection can be reused.
+func decodeResponse(conn *netConn, resp interface{}) (bool, error) {
+	// Decode the error if any
+	var rpcError string
+	if err := conn.dec.Decode(&rpcError); err != nil {
+		conn.Release()
+		return false, err
+	}
+
+	// Decode the response
+	if err := conn.dec.Decode(resp); err != nil {
+		conn.Release()
+		return false, err
+	}
+
+	// Format an error if any
+	if rpcError != "" {
+		return true, fmt.Errorf(rpcError)
+	}
+	return true, nil
+}
+
+// sendRPC is used to encode and send the RPC.
+func sendRPC(conn *netConn, rpcType uint8, args interface{}) error {
+	// Write the request type
+	if err := conn.w.WriteByte(rpcType); err != nil {
+		conn.Release()
+		return err
+	}
+
+	// Send the request
+	if err := conn.enc.Encode(args); err != nil {
+		conn.Release()
+		return err
+	}
+
+	// Flush
+	if err := conn.w.Flush(); err != nil {
+		conn.Release()
+		return err
+	}
+	return nil
+}
+
+// newNetPipeline is used to construct a netPipeline from a given
+// transport and connection.
+func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline {
+	n := &netPipeline{
+		conn:         conn,
+		trans:        trans,
+		doneCh:       make(chan AppendFuture, rpcMaxPipeline),
+		inprogressCh: make(chan *appendFuture, rpcMaxPipeline),
+		shutdownCh:   make(chan struct{}),
+	}
+	go n.decodeResponses()
+	return n
+}
+
+// decodeResponses is a long running routine that decodes the responses
+// sent on the connection.
+func (n *netPipeline) decodeResponses() {
+	timeout := n.trans.timeout
+	for {
+		select {
+		case future := <-n.inprogressCh:
+			if timeout > 0 {
+				n.conn.conn.SetReadDeadline(time.Now().Add(timeout))
+			}
+
+			_, err := decodeResponse(n.conn, future.resp)
+			future.respond(err)
+			select {
+			case n.doneCh <- future:
+			case <-n.shutdownCh:
+				return
+			}
+		case <-n.shutdownCh:
+			return
+		}
+	}
+}
+
+// AppendEntries is used to pipeline a new append entries request.
+func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) {
+	// Create a new future
+	future := &appendFuture{
+		start: time.Now(),
+		args:  args,
+		resp:  resp,
+	}
+	future.init()
+
+	// Add a send timeout
+	if timeout := n.trans.timeout; timeout > 0 {
+		n.conn.conn.SetWriteDeadline(time.Now().Add(timeout))
+	}
+
+	// Send the RPC
+	if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil {
+		return nil, err
+	}
+
+	// Hand-off for decoding, this can also cause back-pressure
+	// to prevent too many inflight requests
+	select {
+	case n.inprogressCh <- future:
+		return future, nil
+	case <-n.shutdownCh:
+		return nil, ErrPipelineShutdown
+	}
+}
+
+// Consumer returns a channel that can be used to consume complete futures.
+func (n *netPipeline) Consumer() <-chan AppendFuture {
+	return n.doneCh
+}
+
+// Closed is used to shutdown the pipeline connection.
+func (n *netPipeline) Close() error {
+	n.shutdownLock.Lock()
+	defer n.shutdownLock.Unlock()
+	if n.shutdown {
+		return nil
+	}
+
+	// Release the connection
+	n.conn.Release()
+
+	n.shutdown = true
+	close(n.shutdownCh)
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/raft/observer.go b/vendor/github.com/hashicorp/raft/observer.go
new file mode 100644
index 0000000000..2d4f37db12
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/observer.go
@@ -0,0 +1,131 @@
+package raft
+
+import (
+	"sync/atomic"
+)
+
+// Observation is sent along the given channel to observers when an event occurs.
+type Observation struct {
+	// Raft holds the Raft instance generating the observation.
+	Raft *Raft
+	// Data holds observation-specific data. Possible types are
+	// *RequestVoteRequest
+	// RaftState
+	// PeerObservation
+	// LeaderObservation
+	Data interface{}
+}
+
+// LeaderObservation is used for the data when leadership changes.
+type LeaderObservation struct {
+	leader ServerAddress
+}
+
+// PeerObservation is sent to observers when peers change.
+type PeerObservation struct {
+	Removed bool
+	Peer    Server
+}
+
+// nextObserverId is used to provide a unique ID for each observer to aid in
+// deregistration.
+var nextObserverID uint64
+
+// FilterFn is a function that can be registered in order to filter observations.
+// The function reports whether the observation should be included - if
+// it returns false, the observation will be filtered out.
+type FilterFn func(o *Observation) bool
+
+// Observer describes what to do with a given observation.
+type Observer struct {
+	// numObserved and numDropped are performance counters for this observer.
+	// 64 bit types must be 64 bit aligned to use with atomic operations on
+	// 32 bit platforms, so keep them at the top of the struct.
+	numObserved uint64
+	numDropped  uint64
+
+	// channel receives observations.
+	channel chan Observation
+
+	// blocking, if true, will cause Raft to block when sending an observation
+	// to this observer. This should generally be set to false.
+	blocking bool
+
+	// filter will be called to determine if an observation should be sent to
+	// the channel.
+	filter FilterFn
+
+	// id is the ID of this observer in the Raft map.
+	id uint64
+}
+
+// NewObserver creates a new observer that can be registered
+// to make observations on a Raft instance. Observations
+// will be sent on the given channel if they satisfy the
+// given filter.
+//
+// If blocking is true, the observer will block when it can't
+// send on the channel, otherwise it may discard events.
+func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer {
+	return &Observer{
+		channel:  channel,
+		blocking: blocking,
+		filter:   filter,
+		id:       atomic.AddUint64(&nextObserverID, 1),
+	}
+}
+
+// GetNumObserved returns the number of observations.
+func (or *Observer) GetNumObserved() uint64 {
+	return atomic.LoadUint64(&or.numObserved)
+}
+
+// GetNumDropped returns the number of dropped observations due to blocking.
+func (or *Observer) GetNumDropped() uint64 {
+	return atomic.LoadUint64(&or.numDropped)
+}
+
+// RegisterObserver registers a new observer.
+func (r *Raft) RegisterObserver(or *Observer) {
+	r.observersLock.Lock()
+	defer r.observersLock.Unlock()
+	r.observers[or.id] = or
+}
+
+// DeregisterObserver deregisters an observer.
+func (r *Raft) DeregisterObserver(or *Observer) {
+	r.observersLock.Lock()
+	defer r.observersLock.Unlock()
+	delete(r.observers, or.id)
+}
+
+// observe sends an observation to every observer.
+func (r *Raft) observe(o interface{}) {
+	// In general observers should not block. But in any case this isn't
+	// disastrous as we only hold a read lock, which merely prevents
+	// registration / deregistration of observers.
+	r.observersLock.RLock()
+	defer r.observersLock.RUnlock()
+	for _, or := range r.observers {
+		// It's wasteful to do this in the loop, but for the common case
+		// where there are no observers we won't create any objects.
+		ob := Observation{Raft: r, Data: o}
+		if or.filter != nil && !or.filter(&ob) {
+			continue
+		}
+		if or.channel == nil {
+			continue
+		}
+		if or.blocking {
+			or.channel <- ob
+			atomic.AddUint64(&or.numObserved, 1)
+		} else {
+			select {
+			case or.channel <- ob:
+				atomic.AddUint64(&or.numObserved, 1)
+			default:
+				atomic.AddUint64(&or.numDropped, 1)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/hashicorp/raft/peersjson.go b/vendor/github.com/hashicorp/raft/peersjson.go
new file mode 100644
index 0000000000..38ca2a8b84
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/peersjson.go
@@ -0,0 +1,98 @@
+package raft
+
+import (
+	"bytes"
+	"encoding/json"
+	"io/ioutil"
+)
+
+// ReadPeersJSON consumes a legacy peers.json file in the format of the old JSON
+// peer store and creates a new-style configuration structure. This can be used
+// to migrate this data or perform manual recovery when running protocol versions
+// that can interoperate with older, unversioned Raft servers. This should not be
+// used once server IDs are in use, because the old peers.json file didn't have
+// support for these, nor non-voter suffrage types.
+func ReadPeersJSON(path string) (Configuration, error) {
+	// Read in the file.
+	buf, err := ioutil.ReadFile(path)
+	if err != nil {
+		return Configuration{}, err
+	}
+
+	// Parse it as JSON.
+	var peers []string
+	dec := json.NewDecoder(bytes.NewReader(buf))
+	if err := dec.Decode(&peers); err != nil {
+		return Configuration{}, err
+	}
+
+	// Map it into the new-style configuration structure. We can only specify
+	// voter roles here, and the ID has to be the same as the address.
+	var configuration Configuration
+	for _, peer := range peers {
+		server := Server{
+			Suffrage: Voter,
+			ID:       ServerID(peer),
+			Address:  ServerAddress(peer),
+		}
+		configuration.Servers = append(configuration.Servers, server)
+	}
+
+	// We should only ingest valid configurations.
+	if err := checkConfiguration(configuration); err != nil {
+		return Configuration{}, err
+	}
+	return configuration, nil
+}
+
+// configEntry is used when decoding a new-style peers.json.
+type configEntry struct {
+	// ID is the ID of the server (a UUID, usually).
+	ID ServerID `json:"id"`
+
+	// Address is the host:port of the server.
+	Address ServerAddress `json:"address"`
+
+	// NonVoter controls the suffrage. We choose this sense so people
+	// can leave this out and get a Voter by default.
+	NonVoter bool `json:"non_voter"`
+}
+
+// ReadConfigJSON reads a new-style peers.json and returns a configuration
+// structure. This can be used to perform manual recovery when running protocol
+// versions that use server IDs.
+func ReadConfigJSON(path string) (Configuration, error) {
+	// Read in the file.
+	buf, err := ioutil.ReadFile(path)
+	if err != nil {
+		return Configuration{}, err
+	}
+
+	// Parse it as JSON.
+	var peers []configEntry
+	dec := json.NewDecoder(bytes.NewReader(buf))
+	if err := dec.Decode(&peers); err != nil {
+		return Configuration{}, err
+	}
+
+	// Map it into the new-style configuration structure.
+	var configuration Configuration
+	for _, peer := range peers {
+		suffrage := Voter
+		if peer.NonVoter {
+			suffrage = Nonvoter
+		}
+		server := Server{
+			Suffrage: suffrage,
+			ID:       peer.ID,
+			Address:  peer.Address,
+		}
+		configuration.Servers = append(configuration.Servers, server)
+	}
+
+	// We should only ingest valid configurations.
+	if err := checkConfiguration(configuration); err != nil {
+		return Configuration{}, err
+	}
+	return configuration, nil
+}
diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go
new file mode 100644
index 0000000000..a759230bc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/raft.go
@@ -0,0 +1,1486 @@
+package raft
+
+import (
+	"bytes"
+	"container/list"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"time"
+
+	"github.com/armon/go-metrics"
+)
+
+const (
+	minCheckInterval = 10 * time.Millisecond
+)
+
+var (
+	keyCurrentTerm  = []byte("CurrentTerm")
+	keyLastVoteTerm = []byte("LastVoteTerm")
+	keyLastVoteCand = []byte("LastVoteCand")
+)
+
+// getRPCHeader returns an initialized RPCHeader struct for the given
+// Raft instance. This structure is sent along with RPC requests and
+// responses.
+func (r *Raft) getRPCHeader() RPCHeader {
+	return RPCHeader{
+		ProtocolVersion: r.conf.ProtocolVersion,
+	}
+}
+
+// checkRPCHeader houses logic about whether this instance of Raft can process
+// the given RPC message.
+func (r *Raft) checkRPCHeader(rpc RPC) error {
+	// Get the header off the RPC message.
+	wh, ok := rpc.Command.(WithRPCHeader)
+	if !ok {
+		return fmt.Errorf("RPC does not have a header")
+	}
+	header := wh.GetRPCHeader()
+
+	// First check is to just make sure the code can understand the
+	// protocol at all.
+	if header.ProtocolVersion < ProtocolVersionMin ||
+		header.ProtocolVersion > ProtocolVersionMax {
+		return ErrUnsupportedProtocol
+	}
+
+	// Second check is whether we should support this message, given the
+	// current protocol we are configured to run. This will drop support
+	// for protocol version 0 starting at protocol version 2, which is
+	// currently what we want, and in general support one version back. We
+	// may need to revisit this policy depending on how future protocol
+	// changes evolve.
+	if header.ProtocolVersion < r.conf.ProtocolVersion-1 {
+		return ErrUnsupportedProtocol
+	}
+
+	return nil
+}
+
+// getSnapshotVersion returns the snapshot version that should be used when
+// creating snapshots, given the protocol version in use.
+func getSnapshotVersion(protocolVersion ProtocolVersion) SnapshotVersion {
+	// Right now we only have two versions and they are backwards compatible
+	// so we don't need to look at the protocol version.
+	return 1
+}
+
+// commitTuple is used to send an index that was committed,
+// with an optional associated future that should be invoked.
+type commitTuple struct {
+	log    *Log
+	future *logFuture
+}
+
+// leaderState is state that is used while we are a leader.
+type leaderState struct {
+	commitCh   chan struct{}
+	commitment *commitment
+	inflight   *list.List // list of logFuture in log index order
+	replState  map[ServerID]*followerReplication
+	notify     map[*verifyFuture]struct{}
+	stepDown   chan struct{}
+}
+
+// setLeader is used to modify the current leader of the cluster
+func (r *Raft) setLeader(leader ServerAddress) {
+	r.leaderLock.Lock()
+	oldLeader := r.leader
+	r.leader = leader
+	r.leaderLock.Unlock()
+	if oldLeader != leader {
+		r.observe(LeaderObservation{leader: leader})
+	}
+}
+
+// requestConfigChange is a helper for the above functions that make
+// configuration change requests. 'req' describes the change. For timeout,
+// see AddVoter.
+func (r *Raft) requestConfigChange(req configurationChangeRequest, timeout time.Duration) IndexFuture {
+	var timer <-chan time.Time
+	if timeout > 0 {
+		timer = time.After(timeout)
+	}
+	future := &configurationChangeFuture{
+		req: req,
+	}
+	future.init()
+	select {
+	case <-timer:
+		return errorFuture{ErrEnqueueTimeout}
+	case r.configurationChangeCh <- future:
+		return future
+	case <-r.shutdownCh:
+		return errorFuture{ErrRaftShutdown}
+	}
+}
+
+// run is a long running goroutine that runs the Raft FSM.
+func (r *Raft) run() {
+	for {
+		// Check if we are doing a shutdown
+		select {
+		case <-r.shutdownCh:
+			// Clear the leader to prevent forwarding
+			r.setLeader("")
+			return
+		default:
+		}
+
+		// Enter into a sub-FSM
+		switch r.getState() {
+		case Follower:
+			r.runFollower()
+		case Candidate:
+			r.runCandidate()
+		case Leader:
+			r.runLeader()
+		}
+	}
+}
+
+// runFollower runs the FSM for a follower.
+func (r *Raft) runFollower() {
+	didWarn := false
+	r.logger.Info(fmt.Sprintf("%v entering Follower state (Leader: %q)", r, r.Leader()))
+	metrics.IncrCounter([]string{"raft", "state", "follower"}, 1)
+	heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout)
+	for {
+		select {
+		case rpc := <-r.rpcCh:
+			r.processRPC(rpc)
+
+		case c := <-r.configurationChangeCh:
+			// Reject any operations since we are not the leader
+			c.respond(ErrNotLeader)
+
+		case a := <-r.applyCh:
+			// Reject any operations since we are not the leader
+			a.respond(ErrNotLeader)
+
+		case v := <-r.verifyCh:
+			// Reject any operations since we are not the leader
+			v.respond(ErrNotLeader)
+
+		case r := <-r.userRestoreCh:
+			// Reject any restores since we are not the leader
+			r.respond(ErrNotLeader)
+
+		case c := <-r.configurationsCh:
+			c.configurations = r.configurations.Clone()
+			c.respond(nil)
+
+		case b := <-r.bootstrapCh:
+			b.respond(r.liveBootstrap(b.configuration))
+
+		case <-heartbeatTimer:
+			// Restart the heartbeat timer
+			heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout)
+
+			// Check if we have had a successful contact
+			lastContact := r.LastContact()
+			if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout {
+				continue
+			}
+
+			// Heartbeat failed! Transition to the candidate state
+			lastLeader := r.Leader()
+			r.setLeader("")
+
+			if r.configurations.latestIndex == 0 {
+				if !didWarn {
+					r.logger.Warn("no known peers, aborting election")
+					didWarn = true
+				}
+			} else if r.configurations.latestIndex == r.configurations.committedIndex &&
+				!hasVote(r.configurations.latest, r.localID) {
+				if !didWarn {
+					r.logger.Warn("not part of stable configuration, aborting election")
+					didWarn = true
+				}
+			} else {
+				r.logger.Warn(fmt.Sprintf("Heartbeat timeout from %q reached, starting election", lastLeader))
+				metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1)
+				r.setState(Candidate)
+				return
+			}
+
+		case <-r.shutdownCh:
+			return
+		}
+	}
+}
+
+// liveBootstrap attempts to seed an initial configuration for the cluster. See
+// the Raft object's member BootstrapCluster for more details. This must only be
+// called on the main thread, and only makes sense in the follower state.
+func (r *Raft) liveBootstrap(configuration Configuration) error {
+	// Use the pre-init API to make the static updates.
+	err := BootstrapCluster(&r.conf, r.logs, r.stable, r.snapshots,
+		r.trans, configuration)
+	if err != nil {
+		return err
+	}
+
+	// Make the configuration live.
+	var entry Log
+	if err := r.logs.GetLog(1, &entry); err != nil {
+		panic(err)
+	}
+	r.setCurrentTerm(1)
+	r.setLastLog(entry.Index, entry.Term)
+	r.processConfigurationLogEntry(&entry)
+	return nil
+}
+
+// runCandidate runs the FSM for a candidate.
+func (r *Raft) runCandidate() {
+	r.logger.Info(fmt.Sprintf("%v entering Candidate state in term %v", r, r.getCurrentTerm()+1))
+	metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1)
+
+	// Start vote for us, and set a timeout
+	voteCh := r.electSelf()
+	electionTimer := randomTimeout(r.conf.ElectionTimeout)
+
+	// Tally the votes, need a simple majority
+	grantedVotes := 0
+	votesNeeded := r.quorumSize()
+	r.logger.Debug(fmt.Sprintf("Votes needed: %d", votesNeeded))
+
+	for r.getState() == Candidate {
+		select {
+		case rpc := <-r.rpcCh:
+			r.processRPC(rpc)
+
+		case vote := <-voteCh:
+			// Check if the term is greater than ours, bail
+			if vote.Term > r.getCurrentTerm() {
+				r.logger.Debug("Newer term discovered, fallback to follower")
+				r.setState(Follower)
+				r.setCurrentTerm(vote.Term)
+				return
+			}
+
+			// Check if the vote is granted
+			if vote.Granted {
+				grantedVotes++
+				r.logger.Debug(fmt.Sprintf("Vote granted from %s in term %v. Tally: %d",
+					vote.voterID, vote.Term, grantedVotes))
+			}
+
+			// Check if we've become the leader
+			if grantedVotes >= votesNeeded {
+				r.logger.Info(fmt.Sprintf("Election won. Tally: %d", grantedVotes))
+				r.setState(Leader)
+				r.setLeader(r.localAddr)
+				return
+			}
+
+		case c := <-r.configurationChangeCh:
+			// Reject any operations since we are not the leader
+			c.respond(ErrNotLeader)
+
+		case a := <-r.applyCh:
+			// Reject any operations since we are not the leader
+			a.respond(ErrNotLeader)
+
+		case v := <-r.verifyCh:
+			// Reject any operations since we are not the leader
+			v.respond(ErrNotLeader)
+
+		case r := <-r.userRestoreCh:
+			// Reject any restores since we are not the leader
+			r.respond(ErrNotLeader)
+
+		case c := <-r.configurationsCh:
+			c.configurations = r.configurations.Clone()
+			c.respond(nil)
+
+		case b := <-r.bootstrapCh:
+			b.respond(ErrCantBootstrap)
+
+		case <-electionTimer:
+			// Election failed! Restart the election. We simply return,
+			// which will kick us back into runCandidate
+			r.logger.Warn("Election timeout reached, restarting election")
+			return
+
+		case <-r.shutdownCh:
+			return
+		}
+	}
+}
+
+// runLeader runs the FSM for a leader. Do the setup here and drop into
+// the leaderLoop for the hot loop.
+func (r *Raft) runLeader() {
+	r.logger.Info(fmt.Sprintf("%v entering Leader state", r))
+	metrics.IncrCounter([]string{"raft", "state", "leader"}, 1)
+
+	// Notify that we are the leader
+	asyncNotifyBool(r.leaderCh, true)
+
+	// Push to the notify channel if given
+	if notify := r.conf.NotifyCh; notify != nil {
+		select {
+		case notify <- true:
+		case <-r.shutdownCh:
+		}
+	}
+
+	// Setup leader state
+	r.leaderState.commitCh = make(chan struct{}, 1)
+	r.leaderState.commitment = newCommitment(r.leaderState.commitCh,
+		r.configurations.latest,
+		r.getLastIndex()+1 /* first index that may be committed in this term */)
+	r.leaderState.inflight = list.New()
+	r.leaderState.replState = make(map[ServerID]*followerReplication)
+	r.leaderState.notify = make(map[*verifyFuture]struct{})
+	r.leaderState.stepDown = make(chan struct{}, 1)
+
+	// Cleanup state on step down
+	defer func() {
+		// Since we were the leader previously, we update our
+		// last contact time when we step down, so that we are not
+		// reporting a last contact time from before we were the
+		// leader. Otherwise, to a client it would seem our data
+		// is extremely stale.
+		r.setLastContact()
+
+		// Stop replication
+		for _, p := range r.leaderState.replState {
+			close(p.stopCh)
+		}
+
+		// Respond to all inflight operations
+		for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() {
+			e.Value.(*logFuture).respond(ErrLeadershipLost)
+		}
+
+		// Respond to any pending verify requests
+		for future := range r.leaderState.notify {
+			future.respond(ErrLeadershipLost)
+		}
+
+		// Clear all the state
+		r.leaderState.commitCh = nil
+		r.leaderState.commitment = nil
+		r.leaderState.inflight = nil
+		r.leaderState.replState = nil
+		r.leaderState.notify = nil
+		r.leaderState.stepDown = nil
+
+		// If we are stepping down for some reason, no known leader.
+		// We may have stepped down due to an RPC call, which would
+		// provide the leader, so we cannot always blank this out.
+		r.leaderLock.Lock()
+		if r.leader == r.localAddr {
+			r.leader = ""
+		}
+		r.leaderLock.Unlock()
+
+		// Notify that we are not the leader
+		asyncNotifyBool(r.leaderCh, false)
+
+		// Push to the notify channel if given
+		if notify := r.conf.NotifyCh; notify != nil {
+			select {
+			case notify <- false:
+			case <-r.shutdownCh:
+				// On shutdown, make a best effort but do not block
+				select {
+				case notify <- false:
+				default:
+				}
+			}
+		}
+	}()
+
+	// Start a replication routine for each peer
+	r.startStopReplication()
+
+	// Dispatch a no-op log entry first. This gets this leader up to the latest
+	// possible commit index, even in the absence of client commands. This used
+	// to append a configuration entry instead of a noop. However, that permits
+	// an unbounded number of uncommitted configurations in the log. We now
+	// maintain that there exists at most one uncommitted configuration entry in
+	// any log, so we have to do proper no-ops here.
+	noop := &logFuture{
+		log: Log{
+			Type: LogNoop,
+		},
+	}
+	r.dispatchLogs([]*logFuture{noop})
+
+	// Sit in the leader loop until we step down
+	r.leaderLoop()
+}
+
+// startStopReplication will set up state and start asynchronous replication to
+// new peers, and stop replication to removed peers. Before removing a peer,
+// it'll instruct the replication routines to try to replicate to the current
+// index. This must only be called from the main thread.
+func (r *Raft) startStopReplication() {
+	inConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers))
+	lastIdx := r.getLastIndex()
+
+	// Start replication goroutines that need starting
+	for _, server := range r.configurations.latest.Servers {
+		if server.ID == r.localID {
+			continue
+		}
+		inConfig[server.ID] = true
+		if _, ok := r.leaderState.replState[server.ID]; !ok {
+			r.logger.Info(fmt.Sprintf("Added peer %v, starting replication", server.ID))
+			s := &followerReplication{
+				peer:        server,
+				commitment:  r.leaderState.commitment,
+				stopCh:      make(chan uint64, 1),
+				triggerCh:   make(chan struct{}, 1),
+				currentTerm: r.getCurrentTerm(),
+				nextIndex:   lastIdx + 1,
+				lastContact: time.Now(),
+				notify:      make(map[*verifyFuture]struct{}),
+				notifyCh:    make(chan struct{}, 1),
+				stepDown:    r.leaderState.stepDown,
+			}
+			r.leaderState.replState[server.ID] = s
+			r.goFunc(func() { r.replicate(s) })
+			asyncNotifyCh(s.triggerCh)
+			r.observe(PeerObservation{Peer: server, Removed: false})
+		}
+	}
+
+	// Stop replication goroutines that need stopping
+	for serverID, repl := range r.leaderState.replState {
+		if inConfig[serverID] {
+			continue
+		}
+		// Replicate up to lastIdx and stop
+		r.logger.Info(fmt.Sprintf("Removed peer %v, stopping replication after %v", serverID, lastIdx))
+		repl.stopCh <- lastIdx
+		close(repl.stopCh)
+		delete(r.leaderState.replState, serverID)
+		r.observe(PeerObservation{Peer: repl.peer, Removed: true})
+	}
+}
+
+// configurationChangeChIfStable returns r.configurationChangeCh if it's safe
+// to process requests from it, or nil otherwise. This must only be called
+// from the main thread.
+//
+// Note that if the conditions here were to change outside of leaderLoop to take
+// this from nil to non-nil, we would need leaderLoop to be kicked.
+func (r *Raft) configurationChangeChIfStable() chan *configurationChangeFuture {
+	// Have to wait until:
+	// 1. The latest configuration is committed, and
+	// 2. This leader has committed some entry (the noop) in this term
+	//    https://groups.google.com/forum/#!msg/raft-dev/t4xj6dJTP6E/d2D9LrWRza8J
+	if r.configurations.latestIndex == r.configurations.committedIndex &&
+		r.getCommitIndex() >= r.leaderState.commitment.startIndex {
+		return r.configurationChangeCh
+	}
+	return nil
+}
+
+// leaderLoop is the hot loop for a leader. It is invoked
+// after all the various leader setup is done.
+func (r *Raft) leaderLoop() {
+	// stepDown is used to track if there is an inflight log that
+	// would cause us to lose leadership (specifically a RemovePeer of
+	// ourselves). If this is the case, we must not allow any logs to
+	// be processed in parallel, otherwise we are basing commit on
+	// only a single peer (ourself) and replicating to an undefined set
+	// of peers.
+	stepDown := false
+
+	lease := time.After(r.conf.LeaderLeaseTimeout)
+	for r.getState() == Leader {
+		select {
+		case rpc := <-r.rpcCh:
+			r.processRPC(rpc)
+
+		case <-r.leaderState.stepDown:
+			r.setState(Follower)
+
+		case <-r.leaderState.commitCh:
+			// Process the newly committed entries
+			oldCommitIndex := r.getCommitIndex()
+			commitIndex := r.leaderState.commitment.getCommitIndex()
+			r.setCommitIndex(commitIndex)
+
+			if r.configurations.latestIndex > oldCommitIndex &&
+				r.configurations.latestIndex <= commitIndex {
+				r.configurations.committed = r.configurations.latest
+				r.configurations.committedIndex = r.configurations.latestIndex
+				if !hasVote(r.configurations.committed, r.localID) {
+					stepDown = true
+				}
+			}
+
+			var numProcessed int
+			start := time.Now()
+
+			for {
+				e := r.leaderState.inflight.Front()
+				if e == nil {
+					break
+				}
+				commitLog := e.Value.(*logFuture)
+				idx := commitLog.log.Index
+				if idx > commitIndex {
+					break
+				}
+				// Measure the commit time
+				metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch)
+
+				r.processLogs(idx, commitLog)
+
+				r.leaderState.inflight.Remove(e)
+				numProcessed++
+			}
+
+			// Measure the time to enqueue batch of logs for FSM to apply
+			metrics.MeasureSince([]string{"raft", "fsm", "enqueue"}, start)
+
+			// Count the number of logs enqueued
+			metrics.SetGauge([]string{"raft", "commitNumLogs"}, float32(numProcessed))
+
+			if stepDown {
+				if r.conf.ShutdownOnRemove {
+					r.logger.Info("Removed ourself, shutting down")
+					r.Shutdown()
+				} else {
+					r.logger.Info("Removed ourself, transitioning to follower")
+					r.setState(Follower)
+				}
+			}
+
+		case v := <-r.verifyCh:
+			if v.quorumSize == 0 {
+				// Just dispatched, start the verification
+				r.verifyLeader(v)
+
+			} else if v.votes < v.quorumSize {
+				// Early return, means there must be a new leader
+				r.logger.Warn("New leader elected, stepping down")
+				r.setState(Follower)
+				delete(r.leaderState.notify, v)
+				for _, repl := range r.leaderState.replState {
+					repl.cleanNotify(v)
+				}
+				v.respond(ErrNotLeader)
+
+			} else {
+				// Quorum of members agree, we are still leader
+				delete(r.leaderState.notify, v)
+				for _, repl := range r.leaderState.replState {
+					repl.cleanNotify(v)
+				}
+				v.respond(nil)
+			}
+
+		case future := <-r.userRestoreCh:
+			err := r.restoreUserSnapshot(future.meta, future.reader)
+			future.respond(err)
+
+		case c := <-r.configurationsCh:
+			c.configurations = r.configurations.Clone()
+			c.respond(nil)
+
+		case future := <-r.configurationChangeChIfStable():
+			r.appendConfigurationEntry(future)
+
+		case b := <-r.bootstrapCh:
+			b.respond(ErrCantBootstrap)
+
+		case newLog := <-r.applyCh:
+			// Group commit, gather all the ready commits
+			ready := []*logFuture{newLog}
+			for i := 0; i < r.conf.MaxAppendEntries; i++ {
+				select {
+				case newLog := <-r.applyCh:
+					ready = append(ready, newLog)
+				default:
+					break
+				}
+			}
+
+			// Dispatch the logs
+			if stepDown {
+				// we're in the process of stepping down as leader, don't process anything new
+				for i := range ready {
+					ready[i].respond(ErrNotLeader)
+				}
+			} else {
+				r.dispatchLogs(ready)
+			}
+
+		case <-lease:
+			// Check if we've exceeded the lease, potentially stepping down
+			maxDiff := r.checkLeaderLease()
+
+			// Next check interval should adjust for the last node we've
+			// contacted, without going negative
+			checkInterval := r.conf.LeaderLeaseTimeout - maxDiff
+			if checkInterval < minCheckInterval {
+				checkInterval = minCheckInterval
+			}
+
+			// Renew the lease timer
+			lease = time.After(checkInterval)
+
+		case <-r.shutdownCh:
+			return
+		}
+	}
+}
+
+// verifyLeader must be called from the main thread for safety.
+// Causes the followers to attempt an immediate heartbeat.
+func (r *Raft) verifyLeader(v *verifyFuture) {
+	// Current leader always votes for self
+	v.votes = 1
+
+	// Set the quorum size, hot-path for single node
+	v.quorumSize = r.quorumSize()
+	if v.quorumSize == 1 {
+		v.respond(nil)
+		return
+	}
+
+	// Track this request
+	v.notifyCh = r.verifyCh
+	r.leaderState.notify[v] = struct{}{}
+
+	// Trigger immediate heartbeats
+	for _, repl := range r.leaderState.replState {
+		repl.notifyLock.Lock()
+		repl.notify[v] = struct{}{}
+		repl.notifyLock.Unlock()
+		asyncNotifyCh(repl.notifyCh)
+	}
+}
+
+// checkLeaderLease is used to check if we can contact a quorum of nodes
+// within the last leader lease interval. If not, we need to step down,
+// as we may have lost connectivity. Returns the maximum duration without
+// contact. This must only be called from the main thread.
+func (r *Raft) checkLeaderLease() time.Duration {
+	// Track contacted nodes, we can always contact ourself
+	contacted := 1
+
+	// Check each follower
+	var maxDiff time.Duration
+	now := time.Now()
+	for peer, f := range r.leaderState.replState {
+		diff := now.Sub(f.LastContact())
+		if diff <= r.conf.LeaderLeaseTimeout {
+			contacted++
+			if diff > maxDiff {
+				maxDiff = diff
+			}
+		} else {
+			// Log at least once at high value, then debug. Otherwise it gets very verbose.
+			if diff <= 3*r.conf.LeaderLeaseTimeout {
+				r.logger.Warn(fmt.Sprintf("Failed to contact %v in %v", peer, diff))
+			} else {
+				r.logger.Debug(fmt.Sprintf("Failed to contact %v in %v", peer, diff))
+			}
+		}
+		metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond))
+	}
+
+	// Verify we can contact a quorum
+	quorum := r.quorumSize()
+	if contacted < quorum {
+		r.logger.Warn("Failed to contact quorum of nodes, stepping down")
+		r.setState(Follower)
+		metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1)
+	}
+	return maxDiff
+}
+
+// quorumSize is used to return the quorum size. This must only be called on
+// the main thread.
+// TODO: revisit usage
+func (r *Raft) quorumSize() int {
+	voters := 0
+	for _, server := range r.configurations.latest.Servers {
+		if server.Suffrage == Voter {
+			voters++
+		}
+	}
+	return voters/2 + 1
+}
+
+// restoreUserSnapshot is used to manually consume an external snapshot, such
+// as if restoring from a backup. We will use the current Raft configuration,
+// not the one from the snapshot, so that we can restore into a new cluster. We
+// will also use the higher of the index of the snapshot, or the current index,
+// and then add 1 to that, so we force a new state with a hole in the Raft log,
+// so that the snapshot will be sent to followers and used for any new joiners.
+// This can only be run on the leader, and returns a future that can be used to
+// block until complete.
+func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error {
+	defer metrics.MeasureSince([]string{"raft", "restoreUserSnapshot"}, time.Now())
+
+	// Sanity check the version.
+	version := meta.Version
+	if version < SnapshotVersionMin || version > SnapshotVersionMax {
+		return fmt.Errorf("unsupported snapshot version %d", version)
+	}
+
+	// We don't support snapshots while there's a config change
+	// outstanding since the snapshot doesn't have a means to
+	// represent this state.
+	committedIndex := r.configurations.committedIndex
+	latestIndex := r.configurations.latestIndex
+	if committedIndex != latestIndex {
+		return fmt.Errorf("cannot restore snapshot now, wait until the configuration entry at %v has been applied (have applied %v)",
+			latestIndex, committedIndex)
+	}
+
+	// Cancel any inflight requests.
+	for {
+		e := r.leaderState.inflight.Front()
+		if e == nil {
+			break
+		}
+		e.Value.(*logFuture).respond(ErrAbortedByRestore)
+		r.leaderState.inflight.Remove(e)
+	}
+
+	// We will overwrite the snapshot metadata with the current term,
+	// an index that's greater than the current index, or the last
+	// index in the snapshot. It's important that we leave a hole in
+	// the index so we know there's nothing in the Raft log there and
+	// replication will fault and send the snapshot.
+	term := r.getCurrentTerm()
+	lastIndex := r.getLastIndex()
+	if meta.Index > lastIndex {
+		lastIndex = meta.Index
+	}
+	lastIndex++
+
+	// Dump the snapshot. Note that we use the latest configuration,
+	// not the one that came with the snapshot.
+	sink, err := r.snapshots.Create(version, lastIndex, term,
+		r.configurations.latest, r.configurations.latestIndex, r.trans)
+	if err != nil {
+		return fmt.Errorf("failed to create snapshot: %v", err)
+	}
+	n, err := io.Copy(sink, reader)
+	if err != nil {
+		sink.Cancel()
+		return fmt.Errorf("failed to write snapshot: %v", err)
+	}
+	if n != meta.Size {
+		sink.Cancel()
+		return fmt.Errorf("failed to write snapshot, size didn't match (%d != %d)", n, meta.Size)
+	}
+	if err := sink.Close(); err != nil {
+		return fmt.Errorf("failed to close snapshot: %v", err)
+	}
+	r.logger.Info(fmt.Sprintf("Copied %d bytes to local snapshot", n))
+
+	// Restore the snapshot into the FSM. If this fails we are in a
+	// bad state so we panic to take ourselves out.
+	fsm := &restoreFuture{ID: sink.ID()}
+	fsm.init()
+	select {
+	case r.fsmMutateCh <- fsm:
+	case <-r.shutdownCh:
+		return ErrRaftShutdown
+	}
+	if err := fsm.Error(); err != nil {
+		panic(fmt.Errorf("failed to restore snapshot: %v", err))
+	}
+
+	// We set the last log so it looks like we've stored the empty
+	// index we burned. The last applied is set because we made the
+	// FSM take the snapshot state, and we store the last snapshot
+	// in the stable store since we created a snapshot as part of
+	// this process.
+	r.setLastLog(lastIndex, term)
+	r.setLastApplied(lastIndex)
+	r.setLastSnapshot(lastIndex, term)
+
+	r.logger.Info(fmt.Sprintf("Restored user snapshot (index %d)", lastIndex))
+	return nil
+}
+
+// appendConfigurationEntry changes the configuration and adds a new
+// configuration entry to the log. This must only be called from the
+// main thread.
+func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) {
+	configuration, err := nextConfiguration(r.configurations.latest, r.configurations.latestIndex, future.req)
+	if err != nil {
+		future.respond(err)
+		return
+	}
+
+	r.logger.Info(fmt.Sprintf("Updating configuration with %s (%v, %v) to %+v",
+		future.req.command, future.req.serverID, future.req.serverAddress, configuration.Servers))
+
+	// In pre-ID compatibility mode we translate all configuration changes
+	// in to an old remove peer message, which can handle all supported
+	// cases for peer changes in the pre-ID world (adding and removing
+	// voters). Both add peer and remove peer log entries are handled
+	// similarly on old Raft servers, but remove peer does extra checks to
+	// see if a leader needs to step down. Since they both assert the full
+	// configuration, then we can safely call remove peer for everything.
+	if r.protocolVersion < 2 {
+		future.log = Log{
+			Type: LogRemovePeerDeprecated,
+			Data: encodePeers(configuration, r.trans),
+		}
+	} else {
+		future.log = Log{
+			Type: LogConfiguration,
+			Data: encodeConfiguration(configuration),
+		}
+	}
+
+	r.dispatchLogs([]*logFuture{&future.logFuture})
+	index := future.Index()
+	r.configurations.latest = configuration
+	r.configurations.latestIndex = index
+	r.leaderState.commitment.setConfiguration(configuration)
+	r.startStopReplication()
+}
+
+// dispatchLog is called on the leader to push a log to disk, mark it
+// as inflight and begin replication of it.
+func (r *Raft) dispatchLogs(applyLogs []*logFuture) {
+	now := time.Now()
+	defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now)
+
+	term := r.getCurrentTerm()
+	lastIndex := r.getLastIndex()
+
+	n := len(applyLogs)
+	logs := make([]*Log, n)
+	metrics.SetGauge([]string{"raft", "leader", "dispatchNumLogs"}, float32(n))
+
+	for idx, applyLog := range applyLogs {
+		applyLog.dispatch = now
+		lastIndex++
+		applyLog.log.Index = lastIndex
+		applyLog.log.Term = term
+		logs[idx] = &applyLog.log
+		r.leaderState.inflight.PushBack(applyLog)
+	}
+
+	// Write the log entry locally
+	if err := r.logs.StoreLogs(logs); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to commit logs: %v", err))
+		for _, applyLog := range applyLogs {
+			applyLog.respond(err)
+		}
+		r.setState(Follower)
+		return
+	}
+	r.leaderState.commitment.match(r.localID, lastIndex)
+
+	// Update the last log since it's on disk now
+	r.setLastLog(lastIndex, term)
+
+	// Notify the replicators of the new log
+	for _, f := range r.leaderState.replState {
+		asyncNotifyCh(f.triggerCh)
+	}
+}
+
+// processLogs is used to apply all the committed entries that haven't been
+// applied up to the given index limit.
+// This can be called from both leaders and followers.
+// Followers call this from AppendEntries, for n entries at a time, and always
+// pass future=nil.
+// Leaders call this once per inflight when entries are committed. They pass
+// the future from inflights.
+func (r *Raft) processLogs(index uint64, future *logFuture) {
+	// Reject logs we've applied already
+	lastApplied := r.getLastApplied()
+	if index <= lastApplied {
+		r.logger.Warn(fmt.Sprintf("Skipping application of old log: %d", index))
+		return
+	}
+
+	// Apply all the preceding logs
+	for idx := r.getLastApplied() + 1; idx <= index; idx++ {
+		// Get the log, either from the future or from our log store
+		if future != nil && future.log.Index == idx {
+			r.processLog(&future.log, future)
+		} else {
+			l := new(Log)
+			if err := r.logs.GetLog(idx, l); err != nil {
+				r.logger.Error(fmt.Sprintf("Failed to get log at %d: %v", idx, err))
+				panic(err)
+			}
+			r.processLog(l, nil)
+		}
+
+		// Update the lastApplied index and term
+		r.setLastApplied(idx)
+	}
+}
+
+// processLog is invoked to process the application of a single committed log entry.
+func (r *Raft) processLog(l *Log, future *logFuture) {
+	switch l.Type {
+	case LogBarrier:
+		// Barrier is handled by the FSM
+		fallthrough
+
+	case LogCommand:
+		// Forward to the fsm handler
+		select {
+		case r.fsmMutateCh <- &commitTuple{l, future}:
+		case <-r.shutdownCh:
+			if future != nil {
+				future.respond(ErrRaftShutdown)
+			}
+		}
+
+		// Return so that the future is only responded to
+		// by the FSM handler when the application is done
+		return
+
+	case LogConfiguration:
+	case LogAddPeerDeprecated:
+	case LogRemovePeerDeprecated:
+	case LogNoop:
+		// Ignore the no-op
+
+	default:
+		panic(fmt.Errorf("unrecognized log type: %#v", l))
+	}
+
+	// Invoke the future if given
+	if future != nil {
+		future.respond(nil)
+	}
+}
+
+// processRPC is called to handle an incoming RPC request. This must only be
+// called from the main thread.
+func (r *Raft) processRPC(rpc RPC) {
+	if err := r.checkRPCHeader(rpc); err != nil {
+		rpc.Respond(nil, err)
+		return
+	}
+
+	switch cmd := rpc.Command.(type) {
+	case *AppendEntriesRequest:
+		r.appendEntries(rpc, cmd)
+	case *RequestVoteRequest:
+		r.requestVote(rpc, cmd)
+	case *InstallSnapshotRequest:
+		r.installSnapshot(rpc, cmd)
+	default:
+		r.logger.Error(fmt.Sprintf("Got unexpected command: %#v", rpc.Command))
+		rpc.Respond(nil, fmt.Errorf("unexpected command"))
+	}
+}
+
+// processHeartbeat is a special handler used just for heartbeat requests
+// so that they can be fast-pathed if a transport supports it. This must only
+// be called from the main thread.
+func (r *Raft) processHeartbeat(rpc RPC) {
+	defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now())
+
+	// Check if we are shutdown, just ignore the RPC
+	select {
+	case <-r.shutdownCh:
+		return
+	default:
+	}
+
+	// Ensure we are only handling a heartbeat
+	switch cmd := rpc.Command.(type) {
+	case *AppendEntriesRequest:
+		r.appendEntries(rpc, cmd)
+	default:
+		r.logger.Error(fmt.Sprintf("Expected heartbeat, got command: %#v", rpc.Command))
+		rpc.Respond(nil, fmt.Errorf("unexpected command"))
+	}
+}
+
+// appendEntries is invoked when we get an append entries RPC call. This must
+// only be called from the main thread.
+func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) {
+	defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now())
+	// Setup a response
+	resp := &AppendEntriesResponse{
+		RPCHeader:      r.getRPCHeader(),
+		Term:           r.getCurrentTerm(),
+		LastLog:        r.getLastIndex(),
+		Success:        false,
+		NoRetryBackoff: false,
+	}
+	var rpcErr error
+	defer func() {
+		rpc.Respond(resp, rpcErr)
+	}()
+
+	// Ignore an older term
+	if a.Term < r.getCurrentTerm() {
+		return
+	}
+
+	// Increase the term if we see a newer one, also transition to follower
+	// if we ever get an appendEntries call
+	if a.Term > r.getCurrentTerm() || r.getState() != Follower {
+		// Ensure transition to follower
+		r.setState(Follower)
+		r.setCurrentTerm(a.Term)
+		resp.Term = a.Term
+	}
+
+	// Save the current leader
+	r.setLeader(ServerAddress(r.trans.DecodePeer(a.Leader)))
+
+	// Verify the last log entry
+	if a.PrevLogEntry > 0 {
+		lastIdx, lastTerm := r.getLastEntry()
+
+		var prevLogTerm uint64
+		if a.PrevLogEntry == lastIdx {
+			prevLogTerm = lastTerm
+
+		} else {
+			var prevLog Log
+			if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil {
+				r.logger.Warn(fmt.Sprintf("Failed to get previous log: %d %v (last: %d)",
+					a.PrevLogEntry, err, lastIdx))
+				resp.NoRetryBackoff = true
+				return
+			}
+			prevLogTerm = prevLog.Term
+		}
+
+		if a.PrevLogTerm != prevLogTerm {
+			r.logger.Warn(fmt.Sprintf("Previous log term mis-match: ours: %d remote: %d",
+				prevLogTerm, a.PrevLogTerm))
+			resp.NoRetryBackoff = true
+			return
+		}
+	}
+
+	// Process any new entries
+	if len(a.Entries) > 0 {
+		start := time.Now()
+
+		// Delete any conflicting entries, skip any duplicates
+		lastLogIdx, _ := r.getLastLog()
+		var newEntries []*Log
+		for i, entry := range a.Entries {
+			if entry.Index > lastLogIdx {
+				newEntries = a.Entries[i:]
+				break
+			}
+			var storeEntry Log
+			if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil {
+				r.logger.Warn(fmt.Sprintf("Failed to get log entry %d: %v",
+					entry.Index, err))
+				return
+			}
+			if entry.Term != storeEntry.Term {
+				r.logger.Warn(fmt.Sprintf("Clearing log suffix from %d to %d", entry.Index, lastLogIdx))
+				if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil {
+					r.logger.Error(fmt.Sprintf("Failed to clear log suffix: %v", err))
+					return
+				}
+				if entry.Index <= r.configurations.latestIndex {
+					r.configurations.latest = r.configurations.committed
+					r.configurations.latestIndex = r.configurations.committedIndex
+				}
+				newEntries = a.Entries[i:]
+				break
+			}
+		}
+
+		if n := len(newEntries); n > 0 {
+			// Append the new entries
+			if err := r.logs.StoreLogs(newEntries); err != nil {
+				r.logger.Error(fmt.Sprintf("Failed to append to logs: %v", err))
+				// TODO: leaving r.getLastLog() in the wrong
+				// state if there was a truncation above
+				return
+			}
+
+			// Handle any new configuration changes
+			for _, newEntry := range newEntries {
+				r.processConfigurationLogEntry(newEntry)
+			}
+
+			// Update the lastLog
+			last := newEntries[n-1]
+			r.setLastLog(last.Index, last.Term)
+		}
+
+		metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start)
+	}
+
+	// Update the commit index
+	if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() {
+		start := time.Now()
+		idx := min(a.LeaderCommitIndex, r.getLastIndex())
+		r.setCommitIndex(idx)
+		if r.configurations.latestIndex <= idx {
+			r.configurations.committed = r.configurations.latest
+			r.configurations.committedIndex = r.configurations.latestIndex
+		}
+		r.processLogs(idx, nil)
+		metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start)
+	}
+
+	// Everything went well, set success
+	resp.Success = true
+	r.setLastContact()
+	return
+}
+
+// processConfigurationLogEntry takes a log entry and updates the latest
+// configuration if the entry results in a new configuration. This must only be
+// called from the main thread, or from NewRaft() before any threads have begun.
+func (r *Raft) processConfigurationLogEntry(entry *Log) {
+	if entry.Type == LogConfiguration {
+		r.configurations.committed = r.configurations.latest
+		r.configurations.committedIndex = r.configurations.latestIndex
+		r.configurations.latest = decodeConfiguration(entry.Data)
+		r.configurations.latestIndex = entry.Index
+	} else if entry.Type == LogAddPeerDeprecated || entry.Type == LogRemovePeerDeprecated {
+		r.configurations.committed = r.configurations.latest
+		r.configurations.committedIndex = r.configurations.latestIndex
+		r.configurations.latest = decodePeers(entry.Data, r.trans)
+		r.configurations.latestIndex = entry.Index
+	}
+}
+
+// requestVote is invoked when we get an request vote RPC call.
+func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) {
+	defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now())
+	r.observe(*req)
+
+	// Setup a response
+	resp := &RequestVoteResponse{
+		RPCHeader: r.getRPCHeader(),
+		Term:      r.getCurrentTerm(),
+		Granted:   false,
+	}
+	var rpcErr error
+	defer func() {
+		rpc.Respond(resp, rpcErr)
+	}()
+
+	// Version 0 servers will panic unless the peers is present. It's only
+	// used on them to produce a warning message.
+	if r.protocolVersion < 2 {
+		resp.Peers = encodePeers(r.configurations.latest, r.trans)
+	}
+
+	// Check if we have an existing leader [who's not the candidate]
+	candidate := r.trans.DecodePeer(req.Candidate)
+	if leader := r.Leader(); leader != "" && leader != candidate {
+		r.logger.Warn(fmt.Sprintf("Rejecting vote request from %v since we have a leader: %v",
+			candidate, leader))
+		return
+	}
+
+	// Ignore an older term
+	if req.Term < r.getCurrentTerm() {
+		return
+	}
+
+	// Increase the term if we see a newer one
+	if req.Term > r.getCurrentTerm() {
+		// Ensure transition to follower
+		r.setState(Follower)
+		r.setCurrentTerm(req.Term)
+		resp.Term = req.Term
+	}
+
+	// Check if we have voted yet
+	lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm)
+	if err != nil && err.Error() != "not found" {
+		r.logger.Error(fmt.Sprintf("Failed to get last vote term: %v", err))
+		return
+	}
+	lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand)
+	if err != nil && err.Error() != "not found" {
+		r.logger.Error(fmt.Sprintf("Failed to get last vote candidate: %v", err))
+		return
+	}
+
+	// Check if we've voted in this election before
+	if lastVoteTerm == req.Term && lastVoteCandBytes != nil {
+		r.logger.Info(fmt.Sprintf("Duplicate RequestVote for same term: %d", req.Term))
+		if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 {
+			r.logger.Warn(fmt.Sprintf("Duplicate RequestVote from candidate: %s", req.Candidate))
+			resp.Granted = true
+		}
+		return
+	}
+
+	// Reject if their term is older
+	lastIdx, lastTerm := r.getLastEntry()
+	if lastTerm > req.LastLogTerm {
+		r.logger.Warn(fmt.Sprintf("Rejecting vote request from %v since our last term is greater (%d, %d)",
+			candidate, lastTerm, req.LastLogTerm))
+		return
+	}
+
+	if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex {
+		r.logger.Warn(fmt.Sprintf("Rejecting vote request from %v since our last index is greater (%d, %d)",
+			candidate, lastIdx, req.LastLogIndex))
+		return
+	}
+
+	// Persist a vote for safety
+	if err := r.persistVote(req.Term, req.Candidate); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to persist vote: %v", err))
+		return
+	}
+
+	resp.Granted = true
+	r.setLastContact()
+	return
+}
+
+// installSnapshot is invoked when we get a InstallSnapshot RPC call.
+// We must be in the follower state for this, since it means we are
+// too far behind a leader for log replay. This must only be called
+// from the main thread.
+func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) {
+	defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now())
+	// Setup a response
+	resp := &InstallSnapshotResponse{
+		Term:    r.getCurrentTerm(),
+		Success: false,
+	}
+	var rpcErr error
+	defer func() {
+		io.Copy(ioutil.Discard, rpc.Reader) // ensure we always consume all the snapshot data from the stream [see issue #212]
+		rpc.Respond(resp, rpcErr)
+	}()
+
+	// Sanity check the version
+	if req.SnapshotVersion < SnapshotVersionMin ||
+		req.SnapshotVersion > SnapshotVersionMax {
+		rpcErr = fmt.Errorf("unsupported snapshot version %d", req.SnapshotVersion)
+		return
+	}
+
+	// Ignore an older term
+	if req.Term < r.getCurrentTerm() {
+		r.logger.Info(fmt.Sprintf("Ignoring installSnapshot request with older term of %d vs currentTerm %d",
+			req.Term, r.getCurrentTerm()))
+		return
+	}
+
+	// Increase the term if we see a newer one
+	if req.Term > r.getCurrentTerm() {
+		// Ensure transition to follower
+		r.setState(Follower)
+		r.setCurrentTerm(req.Term)
+		resp.Term = req.Term
+	}
+
+	// Save the current leader
+	r.setLeader(ServerAddress(r.trans.DecodePeer(req.Leader)))
+
+	// Create a new snapshot
+	var reqConfiguration Configuration
+	var reqConfigurationIndex uint64
+	if req.SnapshotVersion > 0 {
+		reqConfiguration = decodeConfiguration(req.Configuration)
+		reqConfigurationIndex = req.ConfigurationIndex
+	} else {
+		reqConfiguration = decodePeers(req.Peers, r.trans)
+		reqConfigurationIndex = req.LastLogIndex
+	}
+	version := getSnapshotVersion(r.protocolVersion)
+	sink, err := r.snapshots.Create(version, req.LastLogIndex, req.LastLogTerm,
+		reqConfiguration, reqConfigurationIndex, r.trans)
+	if err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to create snapshot to install: %v", err))
+		rpcErr = fmt.Errorf("failed to create snapshot: %v", err)
+		return
+	}
+
+	// Spill the remote snapshot to disk
+	n, err := io.Copy(sink, rpc.Reader)
+	if err != nil {
+		sink.Cancel()
+		r.logger.Error(fmt.Sprintf("Failed to copy snapshot: %v", err))
+		rpcErr = err
+		return
+	}
+
+	// Check that we received it all
+	if n != req.Size {
+		sink.Cancel()
+		r.logger.Error(fmt.Sprintf("Failed to receive whole snapshot: %d / %d", n, req.Size))
+		rpcErr = fmt.Errorf("short read")
+		return
+	}
+
+	// Finalize the snapshot
+	if err := sink.Close(); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to finalize snapshot: %v", err))
+		rpcErr = err
+		return
+	}
+	r.logger.Info(fmt.Sprintf("Copied %d bytes to local snapshot", n))
+
+	// Restore snapshot
+	future := &restoreFuture{ID: sink.ID()}
+	future.init()
+	select {
+	case r.fsmMutateCh <- future:
+	case <-r.shutdownCh:
+		future.respond(ErrRaftShutdown)
+		return
+	}
+
+	// Wait for the restore to happen
+	if err := future.Error(); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to restore snapshot: %v", err))
+		rpcErr = err
+		return
+	}
+
+	// Update the lastApplied so we don't replay old logs
+	r.setLastApplied(req.LastLogIndex)
+
+	// Update the last stable snapshot info
+	r.setLastSnapshot(req.LastLogIndex, req.LastLogTerm)
+
+	// Restore the peer set
+	r.configurations.latest = reqConfiguration
+	r.configurations.latestIndex = reqConfigurationIndex
+	r.configurations.committed = reqConfiguration
+	r.configurations.committedIndex = reqConfigurationIndex
+
+	// Compact logs, continue even if this fails
+	if err := r.compactLogs(req.LastLogIndex); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to compact logs: %v", err))
+	}
+
+	r.logger.Info("Installed remote snapshot")
+	resp.Success = true
+	r.setLastContact()
+	return
+}
+
+// setLastContact is used to set the last contact time to now
+func (r *Raft) setLastContact() {
+	r.lastContactLock.Lock()
+	r.lastContact = time.Now()
+	r.lastContactLock.Unlock()
+}
+
+type voteResult struct {
+	RequestVoteResponse
+	voterID ServerID
+}
+
+// electSelf is used to send a RequestVote RPC to all peers, and vote for
+// ourself. This has the side affecting of incrementing the current term. The
+// response channel returned is used to wait for all the responses (including a
+// vote for ourself). This must only be called from the main thread.
+func (r *Raft) electSelf() <-chan *voteResult {
+	// Create a response channel
+	respCh := make(chan *voteResult, len(r.configurations.latest.Servers))
+
+	// Increment the term
+	r.setCurrentTerm(r.getCurrentTerm() + 1)
+
+	// Construct the request
+	lastIdx, lastTerm := r.getLastEntry()
+	req := &RequestVoteRequest{
+		RPCHeader:    r.getRPCHeader(),
+		Term:         r.getCurrentTerm(),
+		Candidate:    r.trans.EncodePeer(r.localID, r.localAddr),
+		LastLogIndex: lastIdx,
+		LastLogTerm:  lastTerm,
+	}
+
+	// Construct a function to ask for a vote
+	askPeer := func(peer Server) {
+		r.goFunc(func() {
+			defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now())
+			resp := &voteResult{voterID: peer.ID}
+			err := r.trans.RequestVote(peer.ID, peer.Address, req, &resp.RequestVoteResponse)
+			if err != nil {
+				r.logger.Error(fmt.Sprintf("Failed to make RequestVote RPC to %v: %v", peer, err))
+				resp.Term = req.Term
+				resp.Granted = false
+			}
+			respCh <- resp
+		})
+	}
+
+	// For each peer, request a vote
+	for _, server := range r.configurations.latest.Servers {
+		if server.Suffrage == Voter {
+			if server.ID == r.localID {
+				// Persist a vote for ourselves
+				if err := r.persistVote(req.Term, req.Candidate); err != nil {
+					r.logger.Error(fmt.Sprintf("Failed to persist vote : %v", err))
+					return nil
+				}
+				// Include our own vote
+				respCh <- &voteResult{
+					RequestVoteResponse: RequestVoteResponse{
+						RPCHeader: r.getRPCHeader(),
+						Term:      req.Term,
+						Granted:   true,
+					},
+					voterID: r.localID,
+				}
+			} else {
+				askPeer(server)
+			}
+		}
+	}
+
+	return respCh
+}
+
+// persistVote is used to persist our vote for safety.
+func (r *Raft) persistVote(term uint64, candidate []byte) error {
+	if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil {
+		return err
+	}
+	if err := r.stable.Set(keyLastVoteCand, candidate); err != nil {
+		return err
+	}
+	return nil
+}
+
+// setCurrentTerm is used to set the current term in a durable manner.
+func (r *Raft) setCurrentTerm(t uint64) {
+	// Persist to disk first
+	if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil {
+		panic(fmt.Errorf("failed to save current term: %v", err))
+	}
+	r.raftState.setCurrentTerm(t)
+}
+
+// setState is used to update the current state. Any state
+// transition causes the known leader to be cleared. This means
+// that leader should be set only after updating the state.
+func (r *Raft) setState(state RaftState) {
+	r.setLeader("")
+	oldState := r.raftState.getState()
+	r.raftState.setState(state)
+	if oldState != state {
+		r.observe(state)
+	}
+}
diff --git a/vendor/github.com/hashicorp/raft/replication.go b/vendor/github.com/hashicorp/raft/replication.go
new file mode 100644
index 0000000000..1f5f1007f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/replication.go
@@ -0,0 +1,572 @@
+package raft
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/armon/go-metrics"
+)
+
+const (
+	maxFailureScale = 12
+	failureWait     = 10 * time.Millisecond
+)
+
+var (
+	// ErrLogNotFound indicates a given log entry is not available.
+	ErrLogNotFound = errors.New("log not found")
+
+	// ErrPipelineReplicationNotSupported can be returned by the transport to
+	// signal that pipeline replication is not supported in general, and that
+	// no error message should be produced.
+	ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported")
+)
+
+// followerReplication is in charge of sending snapshots and log entries from
+// this leader during this particular term to a remote follower.
+type followerReplication struct {
+	// peer contains the network address and ID of the remote follower.
+	peer Server
+
+	// commitment tracks the entries acknowledged by followers so that the
+	// leader's commit index can advance. It is updated on successful
+	// AppendEntries responses.
+	commitment *commitment
+
+	// stopCh is notified/closed when this leader steps down or the follower is
+	// removed from the cluster. In the follower removed case, it carries a log
+	// index; replication should be attempted with a best effort up through that
+	// index, before exiting.
+	stopCh chan uint64
+	// triggerCh is notified every time new entries are appended to the log.
+	triggerCh chan struct{}
+
+	// currentTerm is the term of this leader, to be included in AppendEntries
+	// requests.
+	currentTerm uint64
+	// nextIndex is the index of the next log entry to send to the follower,
+	// which may fall past the end of the log.
+	nextIndex uint64
+
+	// lastContact is updated to the current time whenever any response is
+	// received from the follower (successful or not). This is used to check
+	// whether the leader should step down (Raft.checkLeaderLease()).
+	lastContact time.Time
+	// lastContactLock protects 'lastContact'.
+	lastContactLock sync.RWMutex
+
+	// failures counts the number of failed RPCs since the last success, which is
+	// used to apply backoff.
+	failures uint64
+
+	// notifyCh is notified to send out a heartbeat, which is used to check that
+	// this server is still leader.
+	notifyCh chan struct{}
+	// notify is a map of futures to be resolved upon receipt of an
+	// acknowledgement, then cleared from this map.
+	notify map[*verifyFuture]struct{}
+	// notifyLock protects 'notify'.
+	notifyLock sync.Mutex
+
+	// stepDown is used to indicate to the leader that we
+	// should step down based on information from a follower.
+	stepDown chan struct{}
+
+	// allowPipeline is used to determine when to pipeline the AppendEntries RPCs.
+	// It is private to this replication goroutine.
+	allowPipeline bool
+}
+
+// notifyAll is used to notify all the waiting verify futures
+// if the follower believes we are still the leader.
+func (s *followerReplication) notifyAll(leader bool) {
+	// Clear the waiting notifies minimizing lock time
+	s.notifyLock.Lock()
+	n := s.notify
+	s.notify = make(map[*verifyFuture]struct{})
+	s.notifyLock.Unlock()
+
+	// Submit our votes
+	for v, _ := range n {
+		v.vote(leader)
+	}
+}
+
+// cleanNotify is used to delete notify, .
+func (s *followerReplication) cleanNotify(v *verifyFuture) {
+	s.notifyLock.Lock()
+	delete(s.notify, v)
+	s.notifyLock.Unlock()
+}
+
+// LastContact returns the time of last contact.
+func (s *followerReplication) LastContact() time.Time {
+	s.lastContactLock.RLock()
+	last := s.lastContact
+	s.lastContactLock.RUnlock()
+	return last
+}
+
+// setLastContact sets the last contact to the current time.
+func (s *followerReplication) setLastContact() {
+	s.lastContactLock.Lock()
+	s.lastContact = time.Now()
+	s.lastContactLock.Unlock()
+}
+
+// replicate is a long running routine that replicates log entries to a single
+// follower.
+func (r *Raft) replicate(s *followerReplication) {
+	// Start an async heartbeating routing
+	stopHeartbeat := make(chan struct{})
+	defer close(stopHeartbeat)
+	r.goFunc(func() { r.heartbeat(s, stopHeartbeat) })
+
+RPC:
+	shouldStop := false
+	for !shouldStop {
+		select {
+		case maxIndex := <-s.stopCh:
+			// Make a best effort to replicate up to this index
+			if maxIndex > 0 {
+				r.replicateTo(s, maxIndex)
+			}
+			return
+		case <-s.triggerCh:
+			lastLogIdx, _ := r.getLastLog()
+			shouldStop = r.replicateTo(s, lastLogIdx)
+		// This is _not_ our heartbeat mechanism but is to ensure
+		// followers quickly learn the leader's commit index when
+		// raft commits stop flowing naturally. The actual heartbeats
+		// can't do this to keep them unblocked by disk IO on the
+		// follower. See https://github.com/hashicorp/raft/issues/282.
+		case <-randomTimeout(r.conf.CommitTimeout):
+			lastLogIdx, _ := r.getLastLog()
+			shouldStop = r.replicateTo(s, lastLogIdx)
+		}
+
+		// If things looks healthy, switch to pipeline mode
+		if !shouldStop && s.allowPipeline {
+			goto PIPELINE
+		}
+	}
+	return
+
+PIPELINE:
+	// Disable until re-enabled
+	s.allowPipeline = false
+
+	// Replicates using a pipeline for high performance. This method
+	// is not able to gracefully recover from errors, and so we fall back
+	// to standard mode on failure.
+	if err := r.pipelineReplicate(s); err != nil {
+		if err != ErrPipelineReplicationNotSupported {
+			r.logger.Error(fmt.Sprintf("Failed to start pipeline replication to %s: %s", s.peer, err))
+		}
+	}
+	goto RPC
+}
+
+// replicateTo is a helper to replicate(), used to replicate the logs up to a
+// given last index.
+// If the follower log is behind, we take care to bring them up to date.
+func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) {
+	// Create the base request
+	var req AppendEntriesRequest
+	var resp AppendEntriesResponse
+	var start time.Time
+START:
+	// Prevent an excessive retry rate on errors
+	if s.failures > 0 {
+		select {
+		case <-time.After(backoff(failureWait, s.failures, maxFailureScale)):
+		case <-r.shutdownCh:
+		}
+	}
+
+	// Setup the request
+	if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound {
+		goto SEND_SNAP
+	} else if err != nil {
+		return
+	}
+
+	// Make the RPC call
+	start = time.Now()
+	if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to AppendEntries to %v: %v", s.peer, err))
+		s.failures++
+		return
+	}
+	appendStats(string(s.peer.ID), start, float32(len(req.Entries)))
+
+	// Check for a newer term, stop running
+	if resp.Term > req.Term {
+		r.handleStaleTerm(s)
+		return true
+	}
+
+	// Update the last contact
+	s.setLastContact()
+
+	// Update s based on success
+	if resp.Success {
+		// Update our replication state
+		updateLastAppended(s, &req)
+
+		// Clear any failures, allow pipelining
+		s.failures = 0
+		s.allowPipeline = true
+	} else {
+		s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1)
+		if resp.NoRetryBackoff {
+			s.failures = 0
+		} else {
+			s.failures++
+		}
+		r.logger.Warn(fmt.Sprintf("AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex))
+	}
+
+CHECK_MORE:
+	// Poll the stop channel here in case we are looping and have been asked
+	// to stop, or have stepped down as leader. Even for the best effort case
+	// where we are asked to replicate to a given index and then shutdown,
+	// it's better to not loop in here to send lots of entries to a straggler
+	// that's leaving the cluster anyways.
+	select {
+	case <-s.stopCh:
+		return true
+	default:
+	}
+
+	// Check if there are more logs to replicate
+	if s.nextIndex <= lastIndex {
+		goto START
+	}
+	return
+
+	// SEND_SNAP is used when we fail to get a log, usually because the follower
+	// is too far behind, and we must ship a snapshot down instead
+SEND_SNAP:
+	if stop, err := r.sendLatestSnapshot(s); stop {
+		return true
+	} else if err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to send snapshot to %v: %v", s.peer, err))
+		return
+	}
+
+	// Check if there is more to replicate
+	goto CHECK_MORE
+}
+
+// sendLatestSnapshot is used to send the latest snapshot we have
+// down to our follower.
+func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) {
+	// Get the snapshots
+	snapshots, err := r.snapshots.List()
+	if err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to list snapshots: %v", err))
+		return false, err
+	}
+
+	// Check we have at least a single snapshot
+	if len(snapshots) == 0 {
+		return false, fmt.Errorf("no snapshots found")
+	}
+
+	// Open the most recent snapshot
+	snapID := snapshots[0].ID
+	meta, snapshot, err := r.snapshots.Open(snapID)
+	if err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapID, err))
+		return false, err
+	}
+	defer snapshot.Close()
+
+	// Setup the request
+	req := InstallSnapshotRequest{
+		RPCHeader:          r.getRPCHeader(),
+		SnapshotVersion:    meta.Version,
+		Term:               s.currentTerm,
+		Leader:             r.trans.EncodePeer(r.localID, r.localAddr),
+		LastLogIndex:       meta.Index,
+		LastLogTerm:        meta.Term,
+		Peers:              meta.Peers,
+		Size:               meta.Size,
+		Configuration:      encodeConfiguration(meta.Configuration),
+		ConfigurationIndex: meta.ConfigurationIndex,
+	}
+
+	// Make the call
+	start := time.Now()
+	var resp InstallSnapshotResponse
+	if err := r.trans.InstallSnapshot(s.peer.ID, s.peer.Address, &req, &resp, snapshot); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to install snapshot %v: %v", snapID, err))
+		s.failures++
+		return false, err
+	}
+	metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", string(s.peer.ID)}, start)
+
+	// Check for a newer term, stop running
+	if resp.Term > req.Term {
+		r.handleStaleTerm(s)
+		return true, nil
+	}
+
+	// Update the last contact
+	s.setLastContact()
+
+	// Check for success
+	if resp.Success {
+		// Update the indexes
+		s.nextIndex = meta.Index + 1
+		s.commitment.match(s.peer.ID, meta.Index)
+
+		// Clear any failures
+		s.failures = 0
+
+		// Notify we are still leader
+		s.notifyAll(true)
+	} else {
+		s.failures++
+		r.logger.Warn(fmt.Sprintf("InstallSnapshot to %v rejected", s.peer))
+	}
+	return false, nil
+}
+
+// heartbeat is used to periodically invoke AppendEntries on a peer
+// to ensure they don't time out. This is done async of replicate(),
+// since that routine could potentially be blocked on disk IO.
+func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) {
+	var failures uint64
+	req := AppendEntriesRequest{
+		RPCHeader: r.getRPCHeader(),
+		Term:      s.currentTerm,
+		Leader:    r.trans.EncodePeer(r.localID, r.localAddr),
+	}
+	var resp AppendEntriesResponse
+	for {
+		// Wait for the next heartbeat interval or forced notify
+		select {
+		case <-s.notifyCh:
+		case <-randomTimeout(r.conf.HeartbeatTimeout / 10):
+		case <-stopCh:
+			return
+		}
+
+		start := time.Now()
+		if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil {
+			r.logger.Error(fmt.Sprintf("Failed to heartbeat to %v: %v", s.peer.Address, err))
+			failures++
+			select {
+			case <-time.After(backoff(failureWait, failures, maxFailureScale)):
+			case <-stopCh:
+			}
+		} else {
+			s.setLastContact()
+			failures = 0
+			metrics.MeasureSince([]string{"raft", "replication", "heartbeat", string(s.peer.ID)}, start)
+			s.notifyAll(resp.Success)
+		}
+	}
+}
+
+// pipelineReplicate is used when we have synchronized our state with the follower,
+// and want to switch to a higher performance pipeline mode of replication.
+// We only pipeline AppendEntries commands, and if we ever hit an error, we fall
+// back to the standard replication which can handle more complex situations.
+func (r *Raft) pipelineReplicate(s *followerReplication) error {
+	// Create a new pipeline
+	pipeline, err := r.trans.AppendEntriesPipeline(s.peer.ID, s.peer.Address)
+	if err != nil {
+		return err
+	}
+	defer pipeline.Close()
+
+	// Log start and stop of pipeline
+	r.logger.Info(fmt.Sprintf("pipelining replication to peer %v", s.peer))
+	defer r.logger.Info(fmt.Sprintf("aborting pipeline replication to peer %v", s.peer))
+
+	// Create a shutdown and finish channel
+	stopCh := make(chan struct{})
+	finishCh := make(chan struct{})
+
+	// Start a dedicated decoder
+	r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) })
+
+	// Start pipeline sends at the last good nextIndex
+	nextIndex := s.nextIndex
+
+	shouldStop := false
+SEND:
+	for !shouldStop {
+		select {
+		case <-finishCh:
+			break SEND
+		case maxIndex := <-s.stopCh:
+			// Make a best effort to replicate up to this index
+			if maxIndex > 0 {
+				r.pipelineSend(s, pipeline, &nextIndex, maxIndex)
+			}
+			break SEND
+		case <-s.triggerCh:
+			lastLogIdx, _ := r.getLastLog()
+			shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx)
+		case <-randomTimeout(r.conf.CommitTimeout):
+			lastLogIdx, _ := r.getLastLog()
+			shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx)
+		}
+	}
+
+	// Stop our decoder, and wait for it to finish
+	close(stopCh)
+	select {
+	case <-finishCh:
+	case <-r.shutdownCh:
+	}
+	return nil
+}
+
+// pipelineSend is used to send data over a pipeline. It is a helper to
+// pipelineReplicate.
+func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) {
+	// Create a new append request
+	req := new(AppendEntriesRequest)
+	if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil {
+		return true
+	}
+
+	// Pipeline the append entries
+	if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to pipeline AppendEntries to %v: %v", s.peer, err))
+		return true
+	}
+
+	// Increase the next send log to avoid re-sending old logs
+	if n := len(req.Entries); n > 0 {
+		last := req.Entries[n-1]
+		*nextIdx = last.Index + 1
+	}
+	return false
+}
+
+// pipelineDecode is used to decode the responses of pipelined requests.
+func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) {
+	defer close(finishCh)
+	respCh := p.Consumer()
+	for {
+		select {
+		case ready := <-respCh:
+			req, resp := ready.Request(), ready.Response()
+			appendStats(string(s.peer.ID), ready.Start(), float32(len(req.Entries)))
+
+			// Check for a newer term, stop running
+			if resp.Term > req.Term {
+				r.handleStaleTerm(s)
+				return
+			}
+
+			// Update the last contact
+			s.setLastContact()
+
+			// Abort pipeline if not successful
+			if !resp.Success {
+				return
+			}
+
+			// Update our replication state
+			updateLastAppended(s, req)
+		case <-stopCh:
+			return
+		}
+	}
+}
+
+// setupAppendEntries is used to setup an append entries request.
+func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error {
+	req.RPCHeader = r.getRPCHeader()
+	req.Term = s.currentTerm
+	req.Leader = r.trans.EncodePeer(r.localID, r.localAddr)
+	req.LeaderCommitIndex = r.getCommitIndex()
+	if err := r.setPreviousLog(req, nextIndex); err != nil {
+		return err
+	}
+	if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil {
+		return err
+	}
+	return nil
+}
+
+// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an
+// AppendEntriesRequest given the next index to replicate.
+func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error {
+	// Guard for the first index, since there is no 0 log entry
+	// Guard against the previous index being a snapshot as well
+	lastSnapIdx, lastSnapTerm := r.getLastSnapshot()
+	if nextIndex == 1 {
+		req.PrevLogEntry = 0
+		req.PrevLogTerm = 0
+
+	} else if (nextIndex - 1) == lastSnapIdx {
+		req.PrevLogEntry = lastSnapIdx
+		req.PrevLogTerm = lastSnapTerm
+
+	} else {
+		var l Log
+		if err := r.logs.GetLog(nextIndex-1, &l); err != nil {
+			r.logger.Error(fmt.Sprintf("Failed to get log at index %d: %v", nextIndex-1, err))
+			return err
+		}
+
+		// Set the previous index and term (0 if nextIndex is 1)
+		req.PrevLogEntry = l.Index
+		req.PrevLogTerm = l.Term
+	}
+	return nil
+}
+
+// setNewLogs is used to setup the logs which should be appended for a request.
+func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error {
+	// Append up to MaxAppendEntries or up to the lastIndex
+	req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries)
+	maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex)
+	for i := nextIndex; i <= maxIndex; i++ {
+		oldLog := new(Log)
+		if err := r.logs.GetLog(i, oldLog); err != nil {
+			r.logger.Error(fmt.Sprintf("Failed to get log at index %d: %v", i, err))
+			return err
+		}
+		req.Entries = append(req.Entries, oldLog)
+	}
+	return nil
+}
+
+// appendStats is used to emit stats about an AppendEntries invocation.
+func appendStats(peer string, start time.Time, logs float32) {
+	metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start)
+	metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs)
+}
+
+// handleStaleTerm is used when a follower indicates that we have a stale term.
+func (r *Raft) handleStaleTerm(s *followerReplication) {
+	r.logger.Error(fmt.Sprintf("peer %v has newer term, stopping replication", s.peer))
+	s.notifyAll(false) // No longer leader
+	asyncNotifyCh(s.stepDown)
+}
+
+// updateLastAppended is used to update follower replication state after a
+// successful AppendEntries RPC.
+// TODO: This isn't used during InstallSnapshot, but the code there is similar.
+func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) {
+	// Mark any inflight logs as committed
+	if logs := req.Entries; len(logs) > 0 {
+		last := logs[len(logs)-1]
+		s.nextIndex = last.Index + 1
+		s.commitment.match(s.peer.ID, last.Index)
+	}
+
+	// Notify still leader
+	s.notifyAll(true)
+}
diff --git a/vendor/github.com/hashicorp/raft/snapshot.go b/vendor/github.com/hashicorp/raft/snapshot.go
new file mode 100644
index 0000000000..2e0f77a5dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/snapshot.go
@@ -0,0 +1,239 @@
+package raft
+
+import (
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/armon/go-metrics"
+)
+
+// SnapshotMeta is for metadata of a snapshot.
+type SnapshotMeta struct {
+	// Version is the version number of the snapshot metadata. This does not cover
+	// the application's data in the snapshot, that should be versioned
+	// separately.
+	Version SnapshotVersion
+
+	// ID is opaque to the store, and is used for opening.
+	ID string
+
+	// Index and Term store when the snapshot was taken.
+	Index uint64
+	Term  uint64
+
+	// Peers is deprecated and used to support version 0 snapshots, but will
+	// be populated in version 1 snapshots as well to help with upgrades.
+	Peers []byte
+
+	// Configuration and ConfigurationIndex are present in version 1
+	// snapshots and later.
+	Configuration      Configuration
+	ConfigurationIndex uint64
+
+	// Size is the size of the snapshot in bytes.
+	Size int64
+}
+
+// SnapshotStore interface is used to allow for flexible implementations
+// of snapshot storage and retrieval. For example, a client could implement
+// a shared state store such as S3, allowing new nodes to restore snapshots
+// without streaming from the leader.
+type SnapshotStore interface {
+	// Create is used to begin a snapshot at a given index and term, and with
+	// the given committed configuration. The version parameter controls
+	// which snapshot version to create.
+	Create(version SnapshotVersion, index, term uint64, configuration Configuration,
+		configurationIndex uint64, trans Transport) (SnapshotSink, error)
+
+	// List is used to list the available snapshots in the store.
+	// It should return then in descending order, with the highest index first.
+	List() ([]*SnapshotMeta, error)
+
+	// Open takes a snapshot ID and provides a ReadCloser. Once close is
+	// called it is assumed the snapshot is no longer needed.
+	Open(id string) (*SnapshotMeta, io.ReadCloser, error)
+}
+
+// SnapshotSink is returned by StartSnapshot. The FSM will Write state
+// to the sink and call Close on completion. On error, Cancel will be invoked.
+type SnapshotSink interface {
+	io.WriteCloser
+	ID() string
+	Cancel() error
+}
+
+// runSnapshots is a long running goroutine used to manage taking
+// new snapshots of the FSM. It runs in parallel to the FSM and
+// main goroutines, so that snapshots do not block normal operation.
+func (r *Raft) runSnapshots() {
+	for {
+		select {
+		case <-randomTimeout(r.conf.SnapshotInterval):
+			// Check if we should snapshot
+			if !r.shouldSnapshot() {
+				continue
+			}
+
+			// Trigger a snapshot
+			if _, err := r.takeSnapshot(); err != nil {
+				r.logger.Error(fmt.Sprintf("Failed to take snapshot: %v", err))
+			}
+
+		case future := <-r.userSnapshotCh:
+			// User-triggered, run immediately
+			id, err := r.takeSnapshot()
+			if err != nil {
+				r.logger.Error(fmt.Sprintf("Failed to take snapshot: %v", err))
+			} else {
+				future.opener = func() (*SnapshotMeta, io.ReadCloser, error) {
+					return r.snapshots.Open(id)
+				}
+			}
+			future.respond(err)
+
+		case <-r.shutdownCh:
+			return
+		}
+	}
+}
+
+// shouldSnapshot checks if we meet the conditions to take
+// a new snapshot.
+func (r *Raft) shouldSnapshot() bool {
+	// Check the last snapshot index
+	lastSnap, _ := r.getLastSnapshot()
+
+	// Check the last log index
+	lastIdx, err := r.logs.LastIndex()
+	if err != nil {
+		r.logger.Error(fmt.Sprintf("Failed to get last log index: %v", err))
+		return false
+	}
+
+	// Compare the delta to the threshold
+	delta := lastIdx - lastSnap
+	return delta >= r.conf.SnapshotThreshold
+}
+
+// takeSnapshot is used to take a new snapshot. This must only be called from
+// the snapshot thread, never the main thread. This returns the ID of the new
+// snapshot, along with an error.
+func (r *Raft) takeSnapshot() (string, error) {
+	defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now())
+
+	// Create a request for the FSM to perform a snapshot.
+	snapReq := &reqSnapshotFuture{}
+	snapReq.init()
+
+	// Wait for dispatch or shutdown.
+	select {
+	case r.fsmSnapshotCh <- snapReq:
+	case <-r.shutdownCh:
+		return "", ErrRaftShutdown
+	}
+
+	// Wait until we get a response
+	if err := snapReq.Error(); err != nil {
+		if err != ErrNothingNewToSnapshot {
+			err = fmt.Errorf("failed to start snapshot: %v", err)
+		}
+		return "", err
+	}
+	defer snapReq.snapshot.Release()
+
+	// Make a request for the configurations and extract the committed info.
+	// We have to use the future here to safely get this information since
+	// it is owned by the main thread.
+	configReq := &configurationsFuture{}
+	configReq.init()
+	select {
+	case r.configurationsCh <- configReq:
+	case <-r.shutdownCh:
+		return "", ErrRaftShutdown
+	}
+	if err := configReq.Error(); err != nil {
+		return "", err
+	}
+	committed := configReq.configurations.committed
+	committedIndex := configReq.configurations.committedIndex
+
+	// We don't support snapshots while there's a config change outstanding
+	// since the snapshot doesn't have a means to represent this state. This
+	// is a little weird because we need the FSM to apply an index that's
+	// past the configuration change, even though the FSM itself doesn't see
+	// the configuration changes. It should be ok in practice with normal
+	// application traffic flowing through the FSM. If there's none of that
+	// then it's not crucial that we snapshot, since there's not much going
+	// on Raft-wise.
+	if snapReq.index < committedIndex {
+		return "", fmt.Errorf("cannot take snapshot now, wait until the configuration entry at %v has been applied (have applied %v)",
+			committedIndex, snapReq.index)
+	}
+
+	// Create a new snapshot.
+	r.logger.Info(fmt.Sprintf("Starting snapshot up to %d", snapReq.index))
+	start := time.Now()
+	version := getSnapshotVersion(r.protocolVersion)
+	sink, err := r.snapshots.Create(version, snapReq.index, snapReq.term, committed, committedIndex, r.trans)
+	if err != nil {
+		return "", fmt.Errorf("failed to create snapshot: %v", err)
+	}
+	metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start)
+
+	// Try to persist the snapshot.
+	start = time.Now()
+	if err := snapReq.snapshot.Persist(sink); err != nil {
+		sink.Cancel()
+		return "", fmt.Errorf("failed to persist snapshot: %v", err)
+	}
+	metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start)
+
+	// Close and check for error.
+	if err := sink.Close(); err != nil {
+		return "", fmt.Errorf("failed to close snapshot: %v", err)
+	}
+
+	// Update the last stable snapshot info.
+	r.setLastSnapshot(snapReq.index, snapReq.term)
+
+	// Compact the logs.
+	if err := r.compactLogs(snapReq.index); err != nil {
+		return "", err
+	}
+
+	r.logger.Info(fmt.Sprintf("Snapshot to %d complete", snapReq.index))
+	return sink.ID(), nil
+}
+
+// compactLogs takes the last inclusive index of a snapshot
+// and trims the logs that are no longer needed.
+func (r *Raft) compactLogs(snapIdx uint64) error {
+	defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now())
+	// Determine log ranges to compact
+	minLog, err := r.logs.FirstIndex()
+	if err != nil {
+		return fmt.Errorf("failed to get first log index: %v", err)
+	}
+
+	// Check if we have enough logs to truncate
+	lastLogIdx, _ := r.getLastLog()
+	if lastLogIdx <= r.conf.TrailingLogs {
+		return nil
+	}
+
+	// Truncate up to the end of the snapshot, or `TrailingLogs`
+	// back from the head, which ever is further back. This ensures
+	// at least `TrailingLogs` entries, but does not allow logs
+	// after the snapshot to be removed.
+	maxLog := min(snapIdx, lastLogIdx-r.conf.TrailingLogs)
+
+	// Log this
+	r.logger.Info(fmt.Sprintf("Compacting logs from %d to %d", minLog, maxLog))
+
+	// Compact the logs
+	if err := r.logs.DeleteRange(minLog, maxLog); err != nil {
+		return fmt.Errorf("log compaction failed: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/raft/stable.go b/vendor/github.com/hashicorp/raft/stable.go
new file mode 100644
index 0000000000..ff59a8c570
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/stable.go
@@ -0,0 +1,15 @@
+package raft
+
+// StableStore is used to provide stable storage
+// of key configurations to ensure safety.
+type StableStore interface {
+	Set(key []byte, val []byte) error
+
+	// Get returns the value for key, or an empty byte slice if key was not found.
+	Get(key []byte) ([]byte, error)
+
+	SetUint64(key []byte, val uint64) error
+
+	// GetUint64 returns the uint64 value for key, or 0 if key was not found.
+	GetUint64(key []byte) (uint64, error)
+}
diff --git a/vendor/github.com/hashicorp/raft/state.go b/vendor/github.com/hashicorp/raft/state.go
new file mode 100644
index 0000000000..a58cd0d19e
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/state.go
@@ -0,0 +1,171 @@
+package raft
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// RaftState captures the state of a Raft node: Follower, Candidate, Leader,
+// or Shutdown.
+type RaftState uint32
+
+const (
+	// Follower is the initial state of a Raft node.
+	Follower RaftState = iota
+
+	// Candidate is one of the valid states of a Raft node.
+	Candidate
+
+	// Leader is one of the valid states of a Raft node.
+	Leader
+
+	// Shutdown is the terminal state of a Raft node.
+	Shutdown
+)
+
+func (s RaftState) String() string {
+	switch s {
+	case Follower:
+		return "Follower"
+	case Candidate:
+		return "Candidate"
+	case Leader:
+		return "Leader"
+	case Shutdown:
+		return "Shutdown"
+	default:
+		return "Unknown"
+	}
+}
+
+// raftState is used to maintain various state variables
+// and provides an interface to set/get the variables in a
+// thread safe manner.
+type raftState struct {
+	// currentTerm commitIndex, lastApplied,  must be kept at the top of
+	// the struct so they're 64 bit aligned which is a requirement for
+	// atomic ops on 32 bit platforms.
+
+	// The current term, cache of StableStore
+	currentTerm uint64
+
+	// Highest committed log entry
+	commitIndex uint64
+
+	// Last applied log to the FSM
+	lastApplied uint64
+
+	// protects 4 next fields
+	lastLock sync.Mutex
+
+	// Cache the latest snapshot index/term
+	lastSnapshotIndex uint64
+	lastSnapshotTerm  uint64
+
+	// Cache the latest log from LogStore
+	lastLogIndex uint64
+	lastLogTerm  uint64
+
+	// Tracks running goroutines
+	routinesGroup sync.WaitGroup
+
+	// The current state
+	state RaftState
+}
+
+func (r *raftState) getState() RaftState {
+	stateAddr := (*uint32)(&r.state)
+	return RaftState(atomic.LoadUint32(stateAddr))
+}
+
+func (r *raftState) setState(s RaftState) {
+	stateAddr := (*uint32)(&r.state)
+	atomic.StoreUint32(stateAddr, uint32(s))
+}
+
+func (r *raftState) getCurrentTerm() uint64 {
+	return atomic.LoadUint64(&r.currentTerm)
+}
+
+func (r *raftState) setCurrentTerm(term uint64) {
+	atomic.StoreUint64(&r.currentTerm, term)
+}
+
+func (r *raftState) getLastLog() (index, term uint64) {
+	r.lastLock.Lock()
+	index = r.lastLogIndex
+	term = r.lastLogTerm
+	r.lastLock.Unlock()
+	return
+}
+
+func (r *raftState) setLastLog(index, term uint64) {
+	r.lastLock.Lock()
+	r.lastLogIndex = index
+	r.lastLogTerm = term
+	r.lastLock.Unlock()
+}
+
+func (r *raftState) getLastSnapshot() (index, term uint64) {
+	r.lastLock.Lock()
+	index = r.lastSnapshotIndex
+	term = r.lastSnapshotTerm
+	r.lastLock.Unlock()
+	return
+}
+
+func (r *raftState) setLastSnapshot(index, term uint64) {
+	r.lastLock.Lock()
+	r.lastSnapshotIndex = index
+	r.lastSnapshotTerm = term
+	r.lastLock.Unlock()
+}
+
+func (r *raftState) getCommitIndex() uint64 {
+	return atomic.LoadUint64(&r.commitIndex)
+}
+
+func (r *raftState) setCommitIndex(index uint64) {
+	atomic.StoreUint64(&r.commitIndex, index)
+}
+
+func (r *raftState) getLastApplied() uint64 {
+	return atomic.LoadUint64(&r.lastApplied)
+}
+
+func (r *raftState) setLastApplied(index uint64) {
+	atomic.StoreUint64(&r.lastApplied, index)
+}
+
+// Start a goroutine and properly handle the race between a routine
+// starting and incrementing, and exiting and decrementing.
+func (r *raftState) goFunc(f func()) {
+	r.routinesGroup.Add(1)
+	go func() {
+		defer r.routinesGroup.Done()
+		f()
+	}()
+}
+
+func (r *raftState) waitShutdown() {
+	r.routinesGroup.Wait()
+}
+
+// getLastIndex returns the last index in stable storage.
+// Either from the last log or from the last snapshot.
+func (r *raftState) getLastIndex() uint64 {
+	r.lastLock.Lock()
+	defer r.lastLock.Unlock()
+	return max(r.lastLogIndex, r.lastSnapshotIndex)
+}
+
+// getLastEntry returns the last index and term in stable storage.
+// Either from the last log or from the last snapshot.
+func (r *raftState) getLastEntry() (uint64, uint64) {
+	r.lastLock.Lock()
+	defer r.lastLock.Unlock()
+	if r.lastLogIndex >= r.lastSnapshotIndex {
+		return r.lastLogIndex, r.lastLogTerm
+	}
+	return r.lastSnapshotIndex, r.lastSnapshotTerm
+}
diff --git a/vendor/github.com/hashicorp/raft/tag.sh b/vendor/github.com/hashicorp/raft/tag.sh
new file mode 100755
index 0000000000..cd16623a70
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/tag.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+set -e
+
+# The version must be supplied from the environment. Do not include the
+# leading "v".
+if [ -z $VERSION ]; then
+    echo "Please specify a version."
+    exit 1
+fi
+
+# Generate the tag.
+echo "==> Tagging version $VERSION..."
+git commit --allow-empty -a --gpg-sign=348FFC4C -m "Release v$VERSION"
+git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" master
+
+exit 0
diff --git a/vendor/github.com/hashicorp/raft/tcp_transport.go b/vendor/github.com/hashicorp/raft/tcp_transport.go
new file mode 100644
index 0000000000..69c928ed92
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/tcp_transport.go
@@ -0,0 +1,116 @@
+package raft
+
+import (
+	"errors"
+	"io"
+	"log"
+	"net"
+	"time"
+)
+
+var (
+	errNotAdvertisable = errors.New("local bind address is not advertisable")
+	errNotTCP          = errors.New("local address is not a TCP address")
+)
+
+// TCPStreamLayer implements StreamLayer interface for plain TCP.
+type TCPStreamLayer struct {
+	advertise net.Addr
+	listener  *net.TCPListener
+}
+
+// NewTCPTransport returns a NetworkTransport that is built on top of
+// a TCP streaming transport layer.
+func NewTCPTransport(
+	bindAddr string,
+	advertise net.Addr,
+	maxPool int,
+	timeout time.Duration,
+	logOutput io.Writer,
+) (*NetworkTransport, error) {
+	return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport {
+		return NewNetworkTransport(stream, maxPool, timeout, logOutput)
+	})
+}
+
+// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of
+// a TCP streaming transport layer, with log output going to the supplied Logger
+func NewTCPTransportWithLogger(
+	bindAddr string,
+	advertise net.Addr,
+	maxPool int,
+	timeout time.Duration,
+	logger *log.Logger,
+) (*NetworkTransport, error) {
+	return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport {
+		return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger)
+	})
+}
+
+// NewTCPTransportWithConfig returns a NetworkTransport that is built on top of
+// a TCP streaming transport layer, using the given config struct.
+func NewTCPTransportWithConfig(
+	bindAddr string,
+	advertise net.Addr,
+	config *NetworkTransportConfig,
+) (*NetworkTransport, error) {
+	return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport {
+		config.Stream = stream
+		return NewNetworkTransportWithConfig(config)
+	})
+}
+
+func newTCPTransport(bindAddr string,
+	advertise net.Addr,
+	transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) {
+	// Try to bind
+	list, err := net.Listen("tcp", bindAddr)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create stream
+	stream := &TCPStreamLayer{
+		advertise: advertise,
+		listener:  list.(*net.TCPListener),
+	}
+
+	// Verify that we have a usable advertise address
+	addr, ok := stream.Addr().(*net.TCPAddr)
+	if !ok {
+		list.Close()
+		return nil, errNotTCP
+	}
+	if addr.IP.IsUnspecified() {
+		list.Close()
+		return nil, errNotAdvertisable
+	}
+
+	// Create the network transport
+	trans := transportCreator(stream)
+	return trans, nil
+}
+
+// Dial implements the StreamLayer interface.
+func (t *TCPStreamLayer) Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) {
+	return net.DialTimeout("tcp", string(address), timeout)
+}
+
+// Accept implements the net.Listener interface.
+func (t *TCPStreamLayer) Accept() (c net.Conn, err error) {
+	return t.listener.Accept()
+}
+
+// Close implements the net.Listener interface.
+func (t *TCPStreamLayer) Close() (err error) {
+	return t.listener.Close()
+}
+
+// Addr implements the net.Listener interface.
+func (t *TCPStreamLayer) Addr() net.Addr {
+	// Use an advertise addr if provided
+	if t.advertise != nil {
+		return t.advertise
+	}
+	return t.listener.Addr()
+}
diff --git a/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/raft/transport.go
new file mode 100644
index 0000000000..85459b221d
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/transport.go
@@ -0,0 +1,124 @@
+package raft
+
+import (
+	"io"
+	"time"
+)
+
+// RPCResponse captures both a response and a potential error.
+type RPCResponse struct {
+	Response interface{}
+	Error    error
+}
+
+// RPC has a command, and provides a response mechanism.
+type RPC struct {
+	Command  interface{}
+	Reader   io.Reader // Set only for InstallSnapshot
+	RespChan chan<- RPCResponse
+}
+
+// Respond is used to respond with a response, error or both
+func (r *RPC) Respond(resp interface{}, err error) {
+	r.RespChan <- RPCResponse{resp, err}
+}
+
+// Transport provides an interface for network transports
+// to allow Raft to communicate with other nodes.
+type Transport interface {
+	// Consumer returns a channel that can be used to
+	// consume and respond to RPC requests.
+	Consumer() <-chan RPC
+
+	// LocalAddr is used to return our local address to distinguish from our peers.
+	LocalAddr() ServerAddress
+
+	// AppendEntriesPipeline returns an interface that can be used to pipeline
+	// AppendEntries requests.
+	AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error)
+
+	// AppendEntries sends the appropriate RPC to the target node.
+	AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error
+
+	// RequestVote sends the appropriate RPC to the target node.
+	RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error
+
+	// InstallSnapshot is used to push a snapshot down to a follower. The data is read from
+	// the ReadCloser and streamed to the client.
+	InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error
+
+	// EncodePeer is used to serialize a peer's address.
+	EncodePeer(id ServerID, addr ServerAddress) []byte
+
+	// DecodePeer is used to deserialize a peer's address.
+	DecodePeer([]byte) ServerAddress
+
+	// SetHeartbeatHandler is used to setup a heartbeat handler
+	// as a fast-pass. This is to avoid head-of-line blocking from
+	// disk IO. If a Transport does not support this, it can simply
+	// ignore the call, and push the heartbeat onto the Consumer channel.
+	SetHeartbeatHandler(cb func(rpc RPC))
+}
+
+// WithClose is an interface that a transport may provide which
+// allows a transport to be shut down cleanly when a Raft instance
+// shuts down.
+//
+// It is defined separately from Transport as unfortunately it wasn't in the
+// original interface specification.
+type WithClose interface {
+	// Close permanently closes a transport, stopping
+	// any associated goroutines and freeing other resources.
+	Close() error
+}
+
+// LoopbackTransport is an interface that provides a loopback transport suitable for testing
+// e.g. InmemTransport. It's there so we don't have to rewrite tests.
+type LoopbackTransport interface {
+	Transport // Embedded transport reference
+	WithPeers // Embedded peer management
+	WithClose // with a close routine
+}
+
+// WithPeers is an interface that a transport may provide which allows for connection and
+// disconnection. Unless the transport is a loopback transport, the transport specified to
+// "Connect" is likely to be nil.
+type WithPeers interface {
+	Connect(peer ServerAddress, t Transport) // Connect a peer
+	Disconnect(peer ServerAddress)           // Disconnect a given peer
+	DisconnectAll()                          // Disconnect all peers, possibly to reconnect them later
+}
+
+// AppendPipeline is used for pipelining AppendEntries requests. It is used
+// to increase the replication throughput by masking latency and better
+// utilizing bandwidth.
+type AppendPipeline interface {
+	// AppendEntries is used to add another request to the pipeline.
+	// The send may block which is an effective form of back-pressure.
+	AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error)
+
+	// Consumer returns a channel that can be used to consume
+	// response futures when they are ready.
+	Consumer() <-chan AppendFuture
+
+	// Close closes the pipeline and cancels all inflight RPCs
+	Close() error
+}
+
+// AppendFuture is used to return information about a pipelined AppendEntries request.
+type AppendFuture interface {
+	Future
+
+	// Start returns the time that the append request was started.
+	// It is always OK to call this method.
+	Start() time.Time
+
+	// Request holds the parameters of the AppendEntries call.
+	// It is always OK to call this method.
+	Request() *AppendEntriesRequest
+
+	// Response holds the results of the AppendEntries call.
+	// This method must only be called after the Error
+	// method returns, and will only be valid on success.
+	Response() *AppendEntriesResponse
+}
diff --git a/vendor/github.com/hashicorp/raft/util.go b/vendor/github.com/hashicorp/raft/util.go
new file mode 100644
index 0000000000..90428d7437
--- /dev/null
+++ b/vendor/github.com/hashicorp/raft/util.go
@@ -0,0 +1,133 @@
+package raft
+
+import (
+	"bytes"
+	crand "crypto/rand"
+	"fmt"
+	"math"
+	"math/big"
+	"math/rand"
+	"time"
+
+	"github.com/hashicorp/go-msgpack/codec"
+)
+
+func init() {
+	// Ensure we use a high-entropy seed for the psuedo-random generator
+	rand.Seed(newSeed())
+}
+
+// returns an int64 from a crypto random source
+// can be used to seed a source for a math/rand.
+func newSeed() int64 {
+	r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+	if err != nil {
+		panic(fmt.Errorf("failed to read random bytes: %v", err))
+	}
+	return r.Int64()
+}
+
+// randomTimeout returns a value that is between the minVal and 2x minVal.
+func randomTimeout(minVal time.Duration) <-chan time.Time {
+	if minVal == 0 {
+		return nil
+	}
+	extra := (time.Duration(rand.Int63()) % minVal)
+	return time.After(minVal + extra)
+}
+
+// min returns the minimum.
+func min(a, b uint64) uint64 {
+	if a <= b {
+		return a
+	}
+	return b
+}
+
+// max returns the maximum.
+func max(a, b uint64) uint64 {
+	if a >= b {
+		return a
+	}
+	return b
+}
+
+// generateUUID is used to generate a random UUID.
+func generateUUID() string {
+	buf := make([]byte, 16)
+	if _, err := crand.Read(buf); err != nil {
+		panic(fmt.Errorf("failed to read random bytes: %v", err))
+	}
+
+	return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16])
+}
+
+// asyncNotifyCh is used to do an async channel send
+// to a single channel without blocking.
+func asyncNotifyCh(ch chan struct{}) {
+	select {
+	case ch <- struct{}{}:
+	default:
+	}
+}
+
+// drainNotifyCh empties out a single-item notification channel without
+// blocking, and returns whether it received anything.
+func drainNotifyCh(ch chan struct{}) bool {
+	select {
+	case <-ch:
+		return true
+	default:
+		return false
+	}
+}
+
+// asyncNotifyBool is used to do an async notification
+// on a bool channel.
+func asyncNotifyBool(ch chan bool, v bool) {
+	select {
+	case ch <- v:
+	default:
+	}
+}
+
+// Decode reverses the encode operation on a byte slice input.
+func decodeMsgPack(buf []byte, out interface{}) error {
+	r := bytes.NewBuffer(buf)
+	hd := codec.MsgpackHandle{}
+	dec := codec.NewDecoder(r, &hd)
+	return dec.Decode(out)
+}
+
+// Encode writes an encoded object to a new bytes buffer.
+func encodeMsgPack(in interface{}) (*bytes.Buffer, error) {
+	buf := bytes.NewBuffer(nil)
+	hd := codec.MsgpackHandle{}
+	enc := codec.NewEncoder(buf, &hd)
+	err := enc.Encode(in)
+	return buf, err
+}
+
+// backoff is used to compute an exponential backoff
+// duration. Base time is scaled by the current round,
+// up to some maximum scale factor.
+func backoff(base time.Duration, round, limit uint64) time.Duration {
+	power := min(round, limit)
+	for power > 2 {
+		base *= 2
+		power--
+	}
+	return base
+}
+
+// Needed for sorting []uint64, used to determine commitment
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int           { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/vendor.json b/vendor/vendor.json
new file mode 100644
index 0000000000..42f68b2267
--- /dev/null
+++ b/vendor/vendor.json
@@ -0,0 +1,45 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "HF3V9ieTLnqjlDcqyGmHxYojZXE=",
+			"path": "github.com/CanonicalLtd/go-dqlite",
+			"revision": "3eab944668d7af5d0fc69ddb387ffda76300541c",
+			"revisionTime": "2019-03-22T09:57:25Z",
+			"tree": true
+		},
+		{
+			"checksumSHA1": "5UAXxv+O1Oxx8kQAUvR94zCVy+Q=",
+			"path": "github.com/CanonicalLtd/raft-http",
+			"revision": "4c2dd679d3b46c11b250d63ae43467d4c4ab0962",
+			"revisionTime": "2018-04-14T15:56:53Z"
+		},
+		{
+			"checksumSHA1": "nflIYP3tDRTgp2g4I1qoK8fDgmc=",
+			"path": "github.com/CanonicalLtd/raft-membership",
+			"revision": "3846634b0164affd0b3dfba1fdd7f9da6387e501",
+			"revisionTime": "2018-04-13T13:33:40Z"
+		},
+		{
+			"checksumSHA1": "nbblYWwQstB9B+OhB1zoDFLhYWQ=",
+			"path": "github.com/CanonicalLtd/raft-test",
+			"revision": "586f073e84d2c7bbf01340756979db76179c7a7a",
+			"revisionTime": "2019-04-30T22:51:17Z",
+			"tree": true
+		},
+		{
+			"checksumSHA1": "RMI9XuADcv+6w3jS5FpqzjDKuhI=",
+			"path": "github.com/hashicorp/raft",
+			"revision": "2c551690b5c0eb05ef7f4ad72ed01f7f6ce3fcb6",
+			"revisionTime": "2019-05-11T03:54:14Z"
+		},
+		{
+			"checksumSHA1": "Y2PM65le0fGtiD12RaKknBscFys=",
+			"path": "github.com/hashicorp/raft-boltdb",
+			"revision": "6e5ba93211eaf8d9a2ad7e41ffad8c6f160f9fe3",
+			"revisionTime": "2017-10-10T15:18:10Z"
+		}
+	],
+	"rootPath": "github.com/lxc/lxd"
+}


More information about the lxc-devel mailing list